summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h1
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acoutput.h21
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/acpi/acpiosxf.h6
-rw-r--r--include/acpi/acpixf.h16
-rw-r--r--include/acpi/actbl2.h17
-rw-r--r--include/acpi/actypes.h13
-rw-r--r--include/acpi/button.h4
-rw-r--r--include/acpi/platform/acenv.h19
-rw-r--r--include/acpi/platform/acenvex.h3
-rw-r--r--include/acpi/platform/acmsvcex.h54
-rw-r--r--include/acpi/platform/acwinex.h49
-rw-r--r--include/acpi/processor.h59
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/atomic-long.h263
-rw-r--r--include/asm-generic/atomic.h11
-rw-r--r--include/asm-generic/atomic64.h4
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/asm-generic/dma-mapping-common.h118
-rw-r--r--include/asm-generic/early_ioremap.h8
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/io.h30
-rw-r--r--include/asm-generic/memory_model.h6
-rw-r--r--include/asm-generic/pci_iomap.h14
-rw-r--r--include/asm-generic/qrwlock.h78
-rw-r--r--include/asm-generic/rtc.h29
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/asm-generic/word-at-a-time.h80
-rw-r--r--include/crypto/aead.h172
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/chacha20.h25
-rw-r--r--include/crypto/hash.h5
-rw-r--r--include/crypto/internal/aead.h72
-rw-r--r--include/crypto/internal/geniv.h9
-rw-r--r--include/crypto/internal/skcipher.h15
-rw-r--r--include/crypto/pkcs7.h13
-rw-r--r--include/crypto/poly1305.h41
-rw-r--r--include/crypto/public_key.h18
-rw-r--r--include/crypto/scatterwalk.h10
-rw-r--r--include/crypto/skcipher.h391
-rw-r--r--include/drm/bridge/dw_hdmi.h7
-rw-r--r--include/drm/drmP.h57
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_crtc.h83
-rw-r--r--include/drm/drm_crtc_helper.h9
-rw-r--r--include/drm/drm_dp_helper.h7
-rw-r--r--include/drm/drm_dp_mst_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h212
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_plane_helper.h45
-rw-r--r--include/drm/i915_component.h11
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/dt-bindings/clock/exynos3250.h1
-rw-r--r--include/dt-bindings/clock/exynos5250.h1
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h240
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h164
-rw-r--r--include/dt-bindings/clock/rk3066a-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h5
-rw-r--r--include/dt-bindings/clock/rk3188-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h384
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h17
-rw-r--r--include/dt-bindings/dma/axi-dmac.h48
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/i2c/i2c.h18
-rw-r--r--include/dt-bindings/leds/leds-ns2.h8
-rw-r--r--include/dt-bindings/media/c8sectpfe.h12
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h36
-rw-r--r--include/dt-bindings/mfd/st-lpc.h1
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h1
-rw-r--r--include/dt-bindings/pinctrl/dra.h20
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h51
-rw-r--r--include/dt-bindings/power/mt8173-power.h15
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-a10.h110
-rw-r--r--include/dt-bindings/reset/stih407-resets.h (renamed from include/dt-bindings/reset-controller/stih407-resets.h)0
-rw-r--r--include/dt-bindings/reset/stih415-resets.h (renamed from include/dt-bindings/reset-controller/stih415-resets.h)0
-rw-r--r--include/dt-bindings/reset/stih416-resets.h (renamed from include/dt-bindings/reset-controller/stih416-resets.h)0
-rw-r--r--include/dt-bindings/reset/tegra124-car.h12
-rw-r--r--include/keys/system_keyring.h7
-rw-r--r--include/kvm/arm_arch_timer.h7
-rw-r--r--include/kvm/arm_vgic.h45
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/asn1_ber_bytecode.h16
-rw-r--r--include/linux/atmel_serial.h240
-rw-r--r--include/linux/atomic.h361
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h82
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bio.h38
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/blk-cgroup.h340
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blk_types.h17
-rw-r--r--include/linux/blkdev.h100
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h6
-rw-r--r--include/linux/ceph/msgr.h4
-rw-r--r--include/linux/cgroup-defs.h15
-rw-r--r--include/linux/cgroup.h24
-rw-r--r--include/linux/cgroup_subsys.h34
-rw-r--r--include/linux/clk-provider.h89
-rw-r--r--include/linux/clk/clk-conf.h2
-rw-r--r--include/linux/clk/shmobile.h12
-rw-r--r--include/linux/clk/tegra.h3
-rw-r--r--include/linux/clk/ti.h160
-rw-r--r--include/linux/clockchips.h32
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h73
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h33
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/crypto.h54
-rw-r--r--include/linux/dax.h39
-rw-r--r--include/linux/debugfs.h20
-rw-r--r--include/linux/devfreq.h24
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device.h28
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/dmaengine.h75
-rw-r--r--include/linux/dmapool.h6
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/extcon.h7
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fs.h57
-rw-r--r--include/linux/fsl_devices.h20
-rw-r--r--include/linux/fsl_ifc.h50
-rw-r--r--include/linux/fsnotify_backend.h59
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/genhd.h33
-rw-r--r--include/linux/gfp.h31
-rw-r--r--include/linux/gpio/consumer.h82
-rw-r--r--include/linux/gpio/driver.h37
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/linux/huge_mm.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i2c.h19
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h2
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/sysfs.h3
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h4
-rw-r--r--include/linux/init_task.h86
-rw-r--r--include/linux/input/touchscreen.h11
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h33
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/iova.h4
-rw-r--r--include/linux/ipmi_smi.h7
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irq.h100
-rw-r--r--include/linux/irqchip/arm-gic-v3.h13
-rw-r--r--include/linux/irqchip/arm-gic.h10
-rw-r--r--include/linux/irqchip/mips-gic.h14
-rw-r--r--include/linux/irqdesc.h43
-rw-r--r--include/linux/irqdomain.h31
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h41
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jiffies.h39
-rw-r--r--include/linux/jump_label.h259
-rw-r--r--include/linux/kasan.h10
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kexec.h18
-rw-r--r--include/linux/klist.h1
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h36
-rw-r--r--include/linux/libnvdimm.h4
-rw-r--r--include/linux/list.h5
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lsm_audit.h7
-rw-r--r--include/linux/lsm_hooks.h6
-rw-r--r--include/linux/mailbox_controller.h7
-rw-r--r--include/linux/mei_cl_bus.h15
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h399
-rw-r--r--include/linux/memory_hotplug.h5
-rw-r--r--include/linux/mfd/88pm80x.h162
-rw-r--r--include/linux/mfd/arizona/core.h3
-rw-r--r--include/linux/mfd/arizona/pdata.h14
-rw-r--r--include/linux/mfd/arizona/registers.h257
-rw-r--r--include/linux/mfd/axp20x.h67
-rw-r--r--include/linux/mfd/da9062/core.h50
-rw-r--r--include/linux/mfd/da9062/registers.h1108
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/lpc_ich.h6
-rw-r--r--include/linux/mfd/max77693-common.h49
-rw-r--r--include/linux/mfd/max77693-private.h134
-rw-r--r--include/linux/mfd/max77843-private.h174
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/mfd/palmas.h7
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h8
-rw-r--r--include/linux/microchipphy.h73
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx4/driver.h1
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h29
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h102
-rw-r--r--include/linux/mm_types.h20
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmu_notifier.h46
-rw-r--r--include/linux/mmzone.h31
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/msi.h109
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h184
-rw-r--r--include/linux/netfilter.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/netlink.h13
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h5
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nmi.h21
-rw-r--r--include/linux/ntb.h9
-rw-r--r--include/linux/ntb_transport.h1
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/nvmem-consumer.h157
-rw-r--r--include/linux/nvmem-provider.h47
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_gpio.h4
-rw-r--r--include/linux/of_irq.h1
-rw-r--r--include/linux/of_platform.h9
-rw-r--r--include/linux/oid_registry.h7
-rw-r--r--include/linux/omap-dma.h2
-rw-r--r--include/linux/oom.h38
-rw-r--r--include/linux/page-flags.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/page_ext.h4
-rw-r--r--include/linux/page_idle.h110
-rw-r--r--include/linux/pci-ats.h49
-rw-r--r--include/linux/pci.h66
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/perf/arm_pmu.h154
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h20
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h (renamed from include/linux/i2c/atmel_mxt_ts.h)12
-rw-r--r--include/linux/platform_data/clk-ux500.h12
-rw-r--r--include/linux/platform_data/gpio-em.h11
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h44
-rw-r--r--include/linux/platform_data/itco_wdt.h19
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h14
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h (renamed from include/linux/input/pixcir_ts.h)1
-rw-r--r--include/linux/platform_data/spi-davinci.h1
-rw-r--r--include/linux/platform_data/spi-mt65xx.h20
-rw-r--r--include/linux/platform_data/video-ep93xx.h8
-rw-r--r--include/linux/platform_data/zforce_ts.h3
-rw-r--r--include/linux/pm_domain.h9
-rw-r--r--include/linux/pm_opp.h42
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pmem.h115
-rw-r--r--include/linux/poison.h11
-rw-r--r--include/linux/printk.h14
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/psci.h52
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h99
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rcupdate.h143
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h385
-rw-r--r--include/linux/regulator/consumer.h16
-rw-r--r--include/linux/regulator/da9211.h19
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/mt6311.h29
-rw-r--r--include/linux/reset.h14
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/scatterlist.h9
-rw-r--r--include/linux/sched.h202
-rw-r--r--include/linux/sched/prio.h12
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/seq_file.h23
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/shdma-base.h5
-rw-r--r--include/linux/skbuff.h153
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/smpboot.h11
-rw-r--r--include/linux/soc/dove/pmu.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h26
-rw-r--r--include/linux/soc/qcom/smd-rpm.h35
-rw-r--r--include/linux/soc/qcom/smd.h46
-rw-r--r--include/linux/soc/qcom/smem.h11
-rw-r--r--include/linux/spi/spi.h64
-rw-r--r--include/linux/spinlock.h40
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/linux/stop_machine.h28
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/string_helpers.h14
-rw-r--r--include/linux/sunrpc/addr.h27
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/cache.h9
-rw-r--r--include/linux/sunrpc/svc.h68
-rw-r--r--include/linux/sunrpc/svc_rdma.h83
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/swapops.h37
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thermal.h34
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/tick.h34
-rw-r--r--include/linux/time64.h35
-rw-r--r--include/linux/timekeeping.h9
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tty.h12
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uprobes.h17
-rw-r--r--include/linux/usb/chipidea.h15
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h198
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/msm_hsusb.h9
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h2
-rw-r--r--include/linux/userfaultfd_k.h85
-rw-r--r--include/linux/verify_pefile.h6
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/zbud.h2
-rw-r--r--include/linux/zpool.h6
-rw-r--r--include/linux/zsmalloc.h6
-rw-r--r--include/media/media-devnode.h4
-rw-r--r--include/media/omap3isp.h158
-rw-r--r--include/media/rc-core.h6
-rw-r--r--include/media/rc-map.h38
-rw-r--r--include/media/tc358743.h131
-rw-r--r--include/media/v4l2-async.h8
-rw-r--r--include/media/v4l2-ctrls.h1018
-rw-r--r--include/media/v4l2-dv-timings.h141
-rw-r--r--include/media/v4l2-event.h47
-rw-r--r--include/media/v4l2-flash-led-class.h12
-rw-r--r--include/media/v4l2-mediabus.h4
-rw-r--r--include/media/v4l2-mem2mem.h20
-rw-r--r--include/media/v4l2-subdev.h376
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h10
-rw-r--r--include/media/videobuf2-memops.h14
-rw-r--r--include/misc/cxl.h10
-rw-r--r--include/net/6lowpan.h23
-rw-r--r--include/net/act_api.h16
-rw-r--r--include/net/addrconf.h35
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/bluetooth/hci_core.h32
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/cfg802154.h10
-rw-r--r--include/net/checksum.h8
-rw-r--r--include/net/cls_cgroup.h29
-rw-r--r--include/net/dsa.h33
-rw-r--r--include/net/dst.h29
-rw-r--r--include/net/dst_metadata.h141
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h30
-rw-r--r--include/net/flow_dissector.h67
-rw-r--r--include/net/geneve.h35
-rw-r--r--include/net/gre.h92
-rw-r--r--include/net/gro_cells.h18
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h22
-rw-r--r--include/net/inetpeer.h118
-rw-r--r--include/net/ip.h31
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip6_tunnel.h17
-rw-r--r--include/net/ip_fib.h37
-rw-r--r--include/net/ip_tunnels.h147
-rw-r--r--include/net/ip_vs.h23
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/lwtunnel.h175
-rw-r--r--include/net/mac80211.h73
-rw-r--r--include/net/mac802154.h17
-rw-r--r--include/net/mpls_iptunnel.h29
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/ipv4/nf_dup_ipv4.h7
-rw-r--r--include/net/netfilter/ipv6/nf_dup_ipv6.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h10
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h11
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h4
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h86
-rw-r--r--include/net/netfilter/nft_dup.h9
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h1
-rw-r--r--include/net/nfc/nci_core.h3
-rw-r--r--include/net/nfc/nfc.h41
-rw-r--r--include/net/nl802154.h4
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h32
-rw-r--r--include/net/sock.h51
-rw-r--r--include/net/switchdev.h10
-rw-r--r--include/net/tc_act/tc_bpf.h2
-rw-r--r--include/net/tc_act/tc_gact.h7
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tcp.h24
-rw-r--r--include/net/timewait_sock.h3
-rw-r--r--include/net/udp_tunnel.h7
-rw-r--r--include/net/vrf.h178
-rw-r--r--include/net/vxlan.h90
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/rdma/ib_cm.h25
-rw-r--r--include/rdma/ib_mad.h82
-rw-r--r--include/rdma/ib_pack.h2
-rw-r--r--include/rdma/ib_smi.h47
-rw-r--r--include/rdma/ib_verbs.h249
-rw-r--r--include/rdma/opa_port_info.h433
-rw-r--r--include/rdma/opa_smi.h47
-rw-r--r--include/rdma/rdma_netlink.h7
-rw-r--r--include/scsi/scsi_common.h5
-rw-r--r--include/scsi/scsi_device.h30
-rw-r--r--include/scsi/scsi_dh.h29
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/tegra/fuse.h6
-rw-r--r--include/soc/tegra/mc.h9
-rw-r--r--include/soc/tegra/pmc.h5
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/hda_i915.h7
-rw-r--r--include/sound/hda_register.h4
-rw-r--r--include/sound/hdaudio.h19
-rw-r--r--include/sound/hdaudio_ext.h71
-rw-r--r--include/sound/rcar_snd.h14
-rw-r--r--include/sound/rt298.h20
-rw-r--r--include/sound/soc-dapm.h84
-rw-r--r--include/sound/soc-topology.h13
-rw-r--r--include/sound/soc.h35
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h14
-rw-r--r--include/target/iscsi/iscsi_target_stat.h2
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h28
-rw-r--r--include/target/target_core_fabric.h14
-rw-r--r--include/trace/events/asoc.h53
-rw-r--r--include/trace/events/ext3.h866
-rw-r--r--include/trace/events/f2fs.h12
-rw-r--r--include/trace/events/fib.h113
-rw-r--r--include/trace/events/jbd.h194
-rw-r--r--include/trace/events/kvm.h30
-rw-r--r--include/trace/events/rcu.h1
-rw-r--r--include/trace/events/sched.h30
-rw-r--r--include/trace/events/spmi.h135
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/thermal_power_allocator.h6
-rw-r--r--include/trace/events/tlb.h3
-rw-r--r--include/trace/events/v4l2.h257
-rw-r--r--include/trace/events/writeback.h180
-rw-r--r--include/uapi/asm-generic/signal.h2
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/drm/drm_fourcc.h7
-rw-r--r--include/uapi/drm/i915_drm.h16
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
-rw-r--r--include/uapi/linux/Kbuild3
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/aufs_type.h2
-rw-r--r--include/uapi/linux/bpf.h29
-rw-r--r--include/uapi/linux/dlm_device.h2
-rw-r--r--include/uapi/linux/elf-em.h3
-rw-r--r--include/uapi/linux/ethtool.h5
-rw-r--r--include/uapi/linux/fib_rules.h2
-rw-r--r--include/uapi/linux/gsmmux.h1
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h16
-rw-r--r--include/uapi/linux/if_packet.h3
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/ila.h15
-rw-r--r--include/uapi/linux/ip_vs.h5
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/lwtunnel.h43
-rw-r--r--include/uapi/linux/mei.h19
-rw-r--r--include/uapi/linux/membarrier.h53
-rw-r--r--include/uapi/linux/mpls.h2
-rw-r--r--include/uapi/linux/mpls_iptunnel.h28
-rw-r--r--include/uapi/linux/ndctl.h12
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h23
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter/xt_CT.h8
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nfs4.h2
-rw-r--r--include/uapi/linux/nfsacl.h1
-rw-r--r--include/uapi/linux/nvme.h1
-rw-r--r--include/uapi/linux/openvswitch.h55
-rw-r--r--include/uapi/linux/perf_event.h35
-rw-r--r--include/uapi/linux/prctl.h7
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/rtnetlink.h15
-rw-r--r--include/uapi/linux/sched.h9
-rw-r--r--include/uapi/linux/securebits.h11
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/toshiba.h32
-rw-r--r--include/uapi/linux/usb/ch9.h12
-rw-r--r--include/uapi/linux/userfaultfd.h167
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/linux/vsp1.h2
-rw-r--r--include/uapi/misc/cxl.h4
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/hfi/Kbuild2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h427
-rw-r--r--include/uapi/rdma/rdma_netlink.h82
-rw-r--r--include/uapi/scsi/Kbuild1
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h174
-rw-r--r--include/uapi/xen/privcmd.h4
-rw-r--r--include/video/kyro.h4
-rw-r--r--include/video/samsung_fimd.h1
-rw-r--r--include/video/vga.h2
-rw-r--r--include/xen/events.h1
-rw-r--r--include/xen/interface/io/netif.h8
-rw-r--r--include/xen/interface/platform.h18
-rw-r--r--include/xen/interface/xen.h37
-rw-r--r--include/xen/interface/xenpmu.h94
-rw-r--r--include/xen/page.h8
-rw-r--r--include/xen/xen-ops.h10
579 files changed, 16648 insertions, 6885 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 6b040f4dd..fcf9080ea 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -147,6 +147,7 @@ struct acpi_pld_info {
* (Intended for BIOS use only)
*/
#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */
+#define ACPI_PLD_REV2_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
/* First 32-bit dword, bits 0:32 */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index cebb8a71a..eb9bdd22a 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -146,10 +146,6 @@
#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
-/* Maximum number of While() loop iterations before forced abort */
-
-#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
-
/* Maximum sleep allowed via Sleep() operator */
#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 11c3a011d..9f20eb4ac 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -192,8 +192,9 @@ struct acpi_exception_info {
#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
+#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
-#define AE_CODE_AML_MAX 0x0021
+#define AE_CODE_AML_MAX 0x0022
/*
* Internal exceptions used for control
@@ -355,7 +356,9 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
"A memory, I/O, or PCI configuration address is invalid"),
EXCEP_TXT("AE_AML_INFINITE_LOOP",
- "An apparent infinite AML While loop, method was aborted")
+ "An apparent infinite AML While loop, method was aborted"),
+ EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
+ "A namespace node is uninitialized or unresolved")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index f56de8c5d..908d4f9c3 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -88,7 +88,8 @@
#define ACPI_LV_DEBUG_OBJECT 0x00000002
#define ACPI_LV_INFO 0x00000004
#define ACPI_LV_REPAIR 0x00000008
-#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F
+#define ACPI_LV_TRACE_POINT 0x00000010
+#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F
/* Trace verbosity level 1 [Standard Trace Level] */
@@ -147,6 +148,7 @@
#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
+#define ACPI_DB_TRACE_POINT ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT)
#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
/* Trace level -- also used in the global "DebugLevel" */
@@ -182,6 +184,20 @@
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
+/*
+ * Global trace flags
+ */
+#define ACPI_TRACE_ENABLED ((u32) 4)
+#define ACPI_TRACE_ONESHOT ((u32) 2)
+#define ACPI_TRACE_OPCODE ((u32) 1)
+
+/* Defaults for trace debugging level/layer */
+
+#define ACPI_TRACE_LEVEL_ALL ACPI_LV_ALL
+#define ACPI_TRACE_LAYER_ALL 0x000001FF
+#define ACPI_TRACE_LEVEL_DEFAULT ACPI_LV_TRACE_POINT
+#define ACPI_TRACE_LAYER_DEFAULT ACPI_EXECUTER
+
#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
/*
* The module name is used primarily for error and debug messages.
@@ -432,6 +448,8 @@
#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
+#define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d)
+
#else /* ACPI_DEBUG_OUTPUT */
/*
* This is the non-debug case -- make everything go away,
@@ -453,6 +471,7 @@
#define ACPI_DUMP_PATHNAME(a, b, c, d)
#define ACPI_DUMP_BUFFER(a, b)
#define ACPI_IS_DEBUG_ENABLED(level, component) 0
+#define ACPI_TRACE_POINT(a, b, c, d)
/* Return macros must have a return statement at the minimum */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 83061cac7..5ba8fb64f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index ea6428b7d..29c691265 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d02df0a49..a54ad1cc9 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -430,4 +430,10 @@ long acpi_os_get_file_offset(ACPI_FILE file);
acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
+void
+acpi_os_trace_point(acpi_trace_event_type type,
+ u8 begin, u8 *aml, char *pathname);
+#endif
+
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e8ec18a4a..c33eeabde 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20150619
+#define ACPI_CA_VERSION 0x20150818
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -251,7 +251,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
* traced each time it is executed.
*/
ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0);
-ACPI_INIT_GLOBAL(acpi_name, acpi_gbl_trace_method_name, 0);
+ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT);
/*
* Runtime configuration of debug output control masks. We want the debug
@@ -504,7 +506,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_object_handler handler,
void **data))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_debug_trace(char *name, u32 debug_level,
+ acpi_debug_trace(const char *name, u32 debug_level,
u32 debug_layer, u32 flags))
/*
@@ -907,9 +909,17 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
const char *module_name,
u32 component_id,
const char *format, ...))
+
+ACPI_DBG_DEPENDENT_RETURN_VOID(void
+ acpi_trace_point(acpi_trace_event_type type,
+ u8 begin,
+ u8 *aml, char *pathname))
ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
acpi_log_error(const char *format, ...))
+ acpi_status acpi_initialize_debugger(void);
+
+void acpi_terminate_debugger(void);
/*
* Divergences
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a948fc586..6e28f544b 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1186,20 +1186,29 @@ enum acpi_spmi_interface_types {
* December 19, 2014
*
* NOTE: There are two versions of the table with the same signature --
- * the client version and the server version.
+ * the client version and the server version. The common platform_class
+ * field is used to differentiate the two types of tables.
*
******************************************************************************/
-struct acpi_table_tcpa_client {
+struct acpi_table_tcpa_hdr {
struct acpi_table_header header; /* Common ACPI table header */
u16 platform_class;
+};
+
+/*
+ * Values for platform_class above.
+ * This is how the client and server subtables are differentiated
+ */
+#define ACPI_TCPA_CLIENT_TABLE 0
+#define ACPI_TCPA_SERVER_TABLE 1
+
+struct acpi_table_tcpa_client {
u32 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
};
struct acpi_table_tcpa_server {
- struct acpi_table_header header; /* Common ACPI table header */
- u16 platform_class;
u16 reserved;
u64 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index c2a41d223..f914958c4 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -662,6 +662,7 @@ typedef u32 acpi_object_type;
#define ACPI_TYPE_DEBUG_OBJECT 0x10
#define ACPI_TYPE_EXTERNAL_MAX 0x10
+#define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1)
/*
* These are object types that do not map directly to the ACPI
@@ -683,6 +684,7 @@ typedef u32 acpi_object_type;
#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */
#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */
+#define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1)
/*
* These are special object types that never appear in
@@ -985,7 +987,8 @@ struct acpi_buffer {
*/
#define ACPI_FULL_PATHNAME 0
#define ACPI_SINGLE_NAME 1
-#define ACPI_NAME_TYPE_MAX 1
+#define ACPI_FULL_PATHNAME_NO_TRAILING 2
+#define ACPI_NAME_TYPE_MAX 2
/*
* Predefined Namespace items
@@ -1246,6 +1249,14 @@ struct acpi_memory_list {
#endif
};
+/* Definitions of trace event types */
+
+typedef enum {
+ ACPI_TRACE_AML_METHOD,
+ ACPI_TRACE_AML_OPCODE,
+ ACPI_TRACE_AML_REGION
+} acpi_trace_event_type;
+
/* Definitions of _OSI support */
#define ACPI_VENDOR_STRINGS 0x01
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 97eea0e4c..1cad8b2d4 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -3,7 +3,7 @@
#include <linux/notifier.h>
-#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE)
+#if IS_ENABLED(CONFIG_ACPI_BUTTON)
extern int acpi_lid_notifier_register(struct notifier_block *nb);
extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
extern int acpi_lid_open(void);
@@ -20,6 +20,6 @@ static inline int acpi_lid_open(void)
{
return 1;
}
-#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */
+#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */
#endif /* ACPI_BUTTON_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 3cedd4394..ec00e2bb0 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -70,13 +70,14 @@
#ifdef ACPI_ASL_COMPILER
#define ACPI_APPLICATION
-#define ACPI_DISASSEMBLER
#define ACPI_DEBUG_OUTPUT
#define ACPI_CONSTANT_EVAL_ONLY
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
#define ACPI_32BIT_PHYSICAL_ADDRESS
+
+#define ACPI_DISASSEMBLER 1
#endif
/* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -89,8 +90,8 @@
#endif
/*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration.
- * All single threaded.
+ * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
+ * configuration. All single threaded.
*/
#if (defined ACPI_BIN_APP) || \
(defined ACPI_DUMP_APP) || \
@@ -123,7 +124,7 @@
#define ACPI_USE_NATIVE_RSDP_POINTER
#endif
-/* acpi_dump configuration. Native mapping used if provied by OSPMs */
+/* acpi_dump configuration. Native mapping used if provided by the host */
#ifdef ACPI_DUMP_APP
#define ACPI_USE_NATIVE_MEMORY_MAPPING
@@ -151,12 +152,12 @@
#define ACPI_USE_LOCAL_CACHE
#endif
-/* Common debug support */
+/* Common debug/disassembler support */
#ifdef ACPI_FULL_DEBUG
-#define ACPI_DEBUGGER
#define ACPI_DEBUG_OUTPUT
-#define ACPI_DISASSEMBLER
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
#endif
@@ -323,8 +324,8 @@
* ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
* the standard header files may be used.
*
- * The ACPICA subsystem only uses low level C library functions that do not call
- * operating system services and may therefore be inlined in the code.
+ * The ACPICA subsystem only uses low level C library functions that do not
+ * call operating system services and may therefore be inlined in the code.
*
* It may be necessary to tailor these include files to the target
* generation environment.
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 0a7dc8e58..2f296cb5f 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinuxex.h>
+#elif defined(WIN32)
+#include "acwinex.h"
+
#elif defined(_AED_EFI)
#include "acefiex.h"
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
new file mode 100644
index 000000000..b64797488
--- /dev/null
+++ b/include/acpi/platform/acmsvcex.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Name: acmsvcex.h - Extra VC specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACMSVCEX_H__
+#define __ACMSVCEX_H__
+
+/* Debug support. */
+
+#ifdef _DEBUG
+#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
+#include <crtdbg.h>
+#endif
+
+#endif /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
new file mode 100644
index 000000000..6ed1d7135
--- /dev/null
+++ b/include/acpi/platform/acwinex.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Name: acwinex.h - Extra OS specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACWINEX_H__
+#define __ACWINEX_H__
+
+/* Windows uses VC */
+
+#endif /* __ACWINEX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 4188a4d3b..ff5f135f1 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
-extern void acpi_processor_unregister_performance(struct
- acpi_processor_performance
- *performance,
- unsigned int cpu);
+extern void acpi_processor_unregister_performance(unsigned int cpu);
/* note: this locks both the calling module and the processor module
if a _PPC object exists, rmmod is disallowed then */
@@ -318,6 +315,7 @@ int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
void acpi_processor_set_pdc(acpi_handle handle);
/* in processor_throttling.c */
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
extern int acpi_processor_set_throttling(struct acpi_processor *pr,
@@ -330,14 +328,59 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
unsigned long action);
extern const struct file_operations acpi_processor_throttling_fops;
extern void acpi_processor_throttling_init(void);
+#else
+static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+ return 0;
+}
+
+static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force)
+{
+ return -ENODEV;
+}
+
+static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+ unsigned long action) {}
+
+static inline void acpi_processor_throttling_init(void) {}
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+
/* in processor_idle.c */
+extern struct cpuidle_driver acpi_idle_driver;
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
int acpi_processor_power_init(struct acpi_processor *pr);
int acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-extern struct cpuidle_driver acpi_idle_driver;
+#else
+static inline int acpi_processor_power_init(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_power_exit(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
-#ifdef CONFIG_PM_SLEEP
+static inline int acpi_processor_hotplug(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
+#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
void acpi_processor_syscore_init(void);
void acpi_processor_syscore_exit(void);
#else
@@ -348,7 +391,7 @@ static inline void acpi_processor_syscore_exit(void) {}
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
void acpi_thermal_cpufreq_init(void);
void acpi_thermal_cpufreq_exit(void);
#else
@@ -360,6 +403,6 @@ static inline void acpi_thermal_cpufreq_exit(void)
{
return;
}
-#endif
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index e840b294c..c62392d9b 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -24,7 +24,7 @@ enum acpi_backlight_type {
acpi_backlight_native,
};
-#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
+#if IS_ENABLED(CONFIG_ACPI_VIDEO)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf020..a94cbebbc 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x) atomic64 ## x
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else /* BITS_PER_LONG == 64 */
+#else
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic_t *v = (atomic_t *)l;
-
- atomic_set(v, i);
-}
+#define ATOMIC_LONG_PFX(x) atomic ## x
+
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo) \
+static inline long atomic_long_read##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
+}
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
+
+#undef ATOMIC_LONG_READ_OP
+
+#define ATOMIC_LONG_SET_OP(mo) \
+static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_set##mo)(v, i); \
+}
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+ (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
static inline void atomic_long_inc(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_inc(v);
+ ATOMIC_LONG_PFX(_inc)(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_dec(v);
+ ATOMIC_LONG_PFX(_dec)(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_add(i, v);
+ ATOMIC_LONG_PFX(_add)(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_sub(i, v);
+ ATOMIC_LONG_PFX(_sub)(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_sub_and_test(i, v);
+ return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_dec_and_test(v);
+ return ATOMIC_LONG_PFX(_dec_and_test)(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_inc_and_test(v);
+ return ATOMIC_LONG_PFX(_inc_and_test)(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_sub_return(i, v);
+ return ATOMIC_LONG_PFX(_add_negative)(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_inc_return(v);
+ return (long)ATOMIC_LONG_PFX(_inc_return)(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_dec_return(v);
+ return (long)ATOMIC_LONG_PFX(_dec_return)(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_add_unless(v, a, u);
+ return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
}
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(v), (new)))
-
-#endif /* BITS_PER_LONG == 64 */
+#define atomic_long_inc_not_zero(l) \
+ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2b1..d4d7e337f 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -)
#endif
-#ifndef atomic_clear_mask
+#ifndef atomic_and
ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v) atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c86c..d48e78cca 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc2d..b42afada1 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 940d5ec12..b1bc954ec 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -6,6 +6,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
+#include <asm-generic/dma-coherent.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
@@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c04..734ad4db3 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
@@ -33,6 +35,12 @@ extern void early_ioremap_setup(void);
*/
extern void early_ioremap_reset(void);
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index f23174fb9..1cbb8338e 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#ifndef FIXMAP_PAGE_NORMAL
#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
#ifndef FIXMAP_PAGE_NOCACHE
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index f56094cfd..eed3bbe88 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -736,6 +736,35 @@ static inline void *phys_to_virt(unsigned long address)
}
#endif
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * If you have an IOMMU your architecture is expected to have both ioremap()
+ * and iounmap() implemented otherwise the asm-generic helpers will provide a
+ * direct mapping.
+ *
+ * There are ioremap_*() call variants, if you have no IOMMU we naturally will
+ * default to direct mapping for all of them, you can override these defaults.
+ * If you have an IOMMU you are highly encouraged to provide your own
+ * ioremap variant implementation as there currently is no safe architecture
+ * agnostic default. To avoid possible improper behaviour default asm-generic
+ * ioremap_*() variants all return NULL when an IOMMU is available. If you've
+ * defined your own ioremap_*() variant you must then declare your own
+ * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ */
+
+#ifdef CONFIG_MMU
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#else /* !CONFIG_MMU */
+
/*
* Change "struct page" to physical address.
*
@@ -743,7 +772,6 @@ static inline void *phys_to_virt(unsigned long address)
* you'll need to provide your own definitions.
*/
-#ifndef CONFIG_MMU
#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 14909b0b9..4b4b056a6 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
+#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 7389c8711..b1e17fcee 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,9 +15,13 @@ struct pci_dev;
#ifdef CONFIG_PCI
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -34,12 +38,22 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
return NULL;
}
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen)
{
return NULL;
}
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
#endif
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54bf..54a8e65e1 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
/*
* External function declarations
*/
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
{
return !(atomic_read(&lock->cnts) & _QW_WMASK);
}
/**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
{
return !atomic_read(&lock->cnts);
}
/**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
- cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
}
/**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
{
u32 cnts;
@@ -89,78 +89,70 @@ static inline int queue_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
- return likely(atomic_cmpxchg(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_cmpxchg_acquire(&lock->cnts,
+ cnts, cnts | _QW_LOCKED) == cnts);
}
/**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
*/
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
{
u32 cnts;
- cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return;
/* The slowpath will decrement the reader count, if necessary. */
- queue_read_lock_slowpath(lock);
+ queued_read_lock_slowpath(lock, cnts);
}
/**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
return;
- queue_write_lock_slowpath(lock);
+ queued_write_lock_slowpath(lock);
}
/**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
{
/*
* Atomically decrement the reader count
*/
- smp_mb__before_atomic();
- atomic_sub(_QR_BIAS, &lock->cnts);
+ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
-#ifndef queue_write_unlock
/**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
{
- /*
- * If the writer field is atomic, it can be cleared directly.
- * Otherwise, an atomic subtraction will be used to clear it.
- */
- smp_mb__before_atomic();
- atomic_sub(_QW_LOCKED, &lock->cnts);
+ smp_store_release((u8 *)&lock->cnts, 0);
}
-#endif
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions.
*/
-#define arch_read_can_lock(l) queue_read_can_lock(l)
-#define arch_write_can_lock(l) queue_write_can_lock(l)
-#define arch_read_lock(l) queue_read_lock(l)
-#define arch_write_lock(l) queue_write_lock(l)
-#define arch_read_trylock(l) queue_read_trylock(l)
-#define arch_write_trylock(l) queue_write_trylock(l)
-#define arch_read_unlock(l) queue_read_unlock(l)
-#define arch_write_unlock(l) queue_write_unlock(l)
+#define arch_read_can_lock(l) queued_read_can_lock(l)
+#define arch_write_can_lock(l) queued_write_can_lock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index fa86f240c..4e3b65583 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
+ unsigned char century = 0;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
@@ -79,6 +83,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
@@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
+ century = bcd2bin(century);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
+ if (century)
+ time->tm_year += (century - 19) * 100;
+
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
+ unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
@@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time)
yrs = 73;
}
#endif
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century) {
+ century = (yrs + 1900) / 100;
+ yrs %= 100;
+ }
+#endif
+
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
@@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
+ century = bin2bcd(century);
}
save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time)
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8bd374d3c..1781e54ea 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -412,12 +412,10 @@
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot) \
- *(.text .text.fixup) \
+ *(.text.hot .text .text.fixup .text.unlikely) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
- *(.text.unlikely)
/* sched.text is aling to function alignment to secure we have same
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 94f9ea8ab..011dde083 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -1,15 +1,10 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- * arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
struct word_at_a_time {
const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 7169ad04a..077cae1e6 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -45,16 +45,40 @@
* a breach in the integrity of the message. In essence, that -EBADMSG error
* code is the key bonus an AEAD cipher has over "standard" block chaining
* modes.
+ *
+ * Memory Structure:
+ *
+ * To support the needs of the most prominent user of AEAD ciphers, namely
+ * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
+ * to.
+ *
+ * The scatter list pointing to the input data must contain:
+ *
+ * * for RFC4106 ciphers, the concatenation of
+ * associated authentication data || IV || plaintext or ciphertext. Note, the
+ * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ * the API call of aead_request_set_ad must provide the length of the AAD and
+ * the IV. The API call of aead_request_set_crypt only points to the size of
+ * the input plaintext or ciphertext.
+ *
+ * * for "normal" AEAD ciphers, the concatenation of
+ * associated authentication data || plaintext or ciphertext.
+ *
+ * It is important to note that if multiple scatter gather list entries form
+ * the input data mentioned above, the first entry must not point to a NULL
+ * buffer. If there is any potential where the AAD buffer can be NULL, the
+ * calling code must contain a precaution to ensure that this does not result
+ * in the first scatter gather list entry pointing to a NULL buffer.
*/
+struct crypto_aead;
+
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
- * @old: Boolean whether the old or new AEAD API is used
* @assoclen: Length in bytes of associated data for authentication
* @cryptlen: Length of data to be encrypted or decrypted
* @iv: Initialisation vector
- * @assoc: Associated data
* @src: Source data
* @dst: Destination data
* @__ctx: Start of private context data
@@ -62,14 +86,11 @@
struct aead_request {
struct crypto_async_request base;
- bool old;
-
unsigned int assoclen;
unsigned int cryptlen;
u8 *iv;
- struct scatterlist *assoc;
struct scatterlist *src;
struct scatterlist *dst;
@@ -77,19 +98,6 @@ struct aead_request {
};
/**
- * struct aead_givcrypt_request - AEAD request with IV generation
- * @seq: Sequence number for IV generation
- * @giv: Space for generated IV
- * @areq: The AEAD request itself
- */
-struct aead_givcrypt_request {
- u64 seq;
- u8 *giv;
-
- struct aead_request areq;
-};
-
-/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
@@ -141,16 +149,6 @@ struct aead_alg {
};
struct crypto_aead {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *child;
-
unsigned int authsize;
unsigned int reqsize;
@@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
-static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return tfm;
-}
-
-static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
{
- return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
- alg->ivsize;
+ return alg->ivsize;
}
/**
@@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_reqtfm(req)->encrypt(req);
+ return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
}
/**
@@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
*/
static inline int crypto_aead_decrypt(struct aead_request *req)
{
- if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
- return crypto_aead_reqtfm(req)->decrypt(req);
+ return crypto_aead_alg(aead)->decrypt(req);
}
/**
@@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
*
* Return: number of bytes
*/
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+ return tfm->reqsize;
+}
/**
* aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
- req->base.tfm = crypto_aead_tfm(tfm->child);
+ req->base.tfm = crypto_aead_tfm(tfm);
}
/**
@@ -526,23 +518,6 @@ static inline void aead_request_set_crypt(struct aead_request *req,
}
/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * Obsolete, do not use.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
- req->old = true;
-}
-
-/**
* aead_request_set_ad - set associated data information
* @req: request handle
* @assoclen: number of bytes in associated data
@@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req,
unsigned int assoclen)
{
req->assoclen = assoclen;
- req->old = false;
-}
-
-static inline struct crypto_aead *aead_givcrypt_reqtfm(
- struct aead_givcrypt_request *req)
-{
- return crypto_aead_reqtfm(&req->areq);
-}
-
-static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
-{
- return aead_givcrypt_reqtfm(req)->givencrypt(req);
-};
-
-static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
-{
- return aead_givcrypt_reqtfm(req)->givdecrypt(req);
-};
-
-static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
- struct crypto_aead *tfm)
-{
- req->areq.base.tfm = crypto_aead_tfm(tfm);
-}
-
-static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
- struct crypto_aead *tfm, gfp_t gfp)
-{
- struct aead_givcrypt_request *req;
-
- req = kmalloc(sizeof(struct aead_givcrypt_request) +
- crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_givcrypt_set_tfm(req, tfm);
-
- return req;
-}
-
-static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
-{
- kfree(req);
-}
-
-static inline void aead_givcrypt_set_callback(
- struct aead_givcrypt_request *req, u32 flags,
- crypto_completion_t compl, void *data)
-{
- aead_request_set_callback(&req->areq, flags, compl, data);
-}
-
-static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int nbytes, void *iv)
-{
- aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
-}
-
-static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- aead_request_set_assoc(&req->areq, assoc, assoclen);
-}
-
-static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
- u8 *giv, u64 seq)
-{
- req->giv = giv;
- req->seq = seq;
}
#endif /* _CRYPTO_AEAD_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index d4ebf6e9a..c9fe145f7 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,6 +18,7 @@
#include <linux/skbuff.h>
struct crypto_aead;
+struct crypto_instance;
struct module;
struct rtattr;
struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+ void (*free)(struct crypto_instance *inst);
unsigned int type;
unsigned int maskclear;
@@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644
index 000000000..274bbaeee
--- /dev/null
+++ b/include/crypto/chacha20.h
@@ -0,0 +1,25 @@
+/*
+ * Common values for the ChaCha20 algorithm
+ */
+
+#ifndef _CRYPTO_CHACHA20_H
+#define _CRYPTO_CHACHA20_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define CHACHA20_IV_SIZE 16
+#define CHACHA20_KEY_SIZE 32
+#define CHACHA20_BLOCK_SIZE 64
+
+struct chacha20_ctx {
+ u32 key[8];
+};
+
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keysize);
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 57c8a6ee3..8e920b44c 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -63,6 +63,11 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define AHASH_REQUEST_ON_STACK(name, ahash) \
+ char __##name##_desc[sizeof(struct ahash_request) + \
+ crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = (void *)__##name##_desc
+
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 4b2547186..5554cdd8d 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
struct rtattr;
struct aead_instance {
+ void (*free)(struct aead_instance *inst);
union {
struct {
char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@ struct crypto_aead_spawn {
struct crypto_spawn base;
};
-extern const struct crypto_type crypto_aead_type;
-extern const struct crypto_type crypto_nivaead_type;
+struct aead_queue {
+ struct crypto_queue base;
+};
static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
-static inline struct crypto_instance *crypto_aead_alg_instance(
- struct crypto_aead *aead)
-{
- return crypto_tfm_alg_instance(&aead->base);
-}
-
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
{
- return aead_instance(crypto_aead_alg_instance(aead));
+ return aead_instance(crypto_tfm_alg_instance(&aead->base));
}
static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
-
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
@@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
crypto_drop_spawn(&spawn->base);
}
-static inline struct crypto_alg *crypto_aead_spawn_alg(
- struct crypto_aead_spawn *spawn)
-{
- return spawn->base.alg;
-}
-
static inline struct aead_alg *crypto_spawn_aead_alg(
struct crypto_aead_spawn *spawn)
{
@@ -118,43 +106,51 @@ static inline struct crypto_aead *crypto_spawn_aead(
return crypto_spawn_tfm2(&spawn->base);
}
-struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
-int aead_geniv_init(struct crypto_tfm *tfm);
-void aead_geniv_exit(struct crypto_tfm *tfm);
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ aead->reqsize = reqsize;
+}
-static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
- return geniv->child;
+ return alg->maxauthsize;
}
-static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
{
- return aead_request_ctx(&req->areq);
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
-static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
- int err)
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
{
- aead_request_complete(&req->areq, err);
+ crypto_init_queue(&queue->base, max_qlen);
}
-static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
- unsigned int reqsize)
+static inline int aead_enqueue_request(struct aead_queue *queue,
+ struct aead_request *request)
{
- crypto_aead_crt(aead)->reqsize = reqsize;
+ return crypto_enqueue_request(&queue->base, &request->base);
}
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+static inline struct aead_request *aead_dequeue_request(
+ struct aead_queue *queue)
{
- return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
- alg->maxauthsize;
+ struct crypto_async_request *req;
+
+ req = crypto_dequeue_request(&queue->base);
+
+ return req ? container_of(req, struct aead_request, base) : NULL;
}
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
{
- return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+ struct crypto_async_request *req;
+
+ req = crypto_get_backlog(&queue->base);
+
+ return req ? container_of(req, struct aead_request, base) : NULL;
}
int crypto_register_aead(struct aead_alg *alg);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 9ca9b871a..59333635e 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,10 +15,19 @@
#include <crypto/internal/aead.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
+ struct crypto_blkcipher *null;
+ u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index b3a46c515..2cf7a61ec 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
return req->base.flags;
}
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+ return req->base.flags;
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 691c79172..441aff9b5 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -9,6 +9,11 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#ifndef _CRYPTO_PKCS7_H
+#define _CRYPTO_PKCS7_H
+
+#include <crypto/public_key.h>
+
struct key;
struct pkcs7_message;
@@ -33,4 +38,10 @@ extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
/*
* pkcs7_verify.c
*/
-extern int pkcs7_verify(struct pkcs7_message *pkcs7);
+extern int pkcs7_verify(struct pkcs7_message *pkcs7,
+ enum key_being_used_for usage);
+
+extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+ const void *data, size_t datalen);
+
+#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644
index 000000000..894df59b7
--- /dev/null
+++ b/include/crypto/poly1305.h
@@ -0,0 +1,41 @@
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_POLY1305_H
+#define _CRYPTO_POLY1305_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLY1305_BLOCK_SIZE 16
+#define POLY1305_KEY_SIZE 32
+#define POLY1305_DIGEST_SIZE 16
+
+struct poly1305_desc_ctx {
+ /* key */
+ u32 r[5];
+ /* finalize key */
+ u32 s[4];
+ /* accumulator */
+ u32 h[5];
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* r key has been set */
+ bool rset;
+ /* s key has been set */
+ bool sset;
+};
+
+int crypto_poly1305_init(struct shash_desc *desc);
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen);
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen);
+int crypto_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen);
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 54add2069..067c242b1 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -33,12 +33,27 @@ extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
enum pkey_id_type {
PKEY_ID_PGP, /* OpenPGP generated key ID */
PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
+ PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
PKEY_ID_TYPE__LAST
};
extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
/*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+ VERIFYING_MODULE_SIGNATURE,
+ VERIFYING_FIRMWARE_SIGNATURE,
+ VERIFYING_KEXEC_PE_SIGNATURE,
+ VERIFYING_KEY_SIGNATURE,
+ VERIFYING_KEY_SELF_SIGNATURE,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
* Note that this may include private part of the key as well as the public
@@ -101,7 +116,8 @@ extern int verify_signature(const struct key *key,
struct asymmetric_key_id;
extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const struct asymmetric_key_id *kid,
+ const struct asymmetric_key_id *id,
+ const struct asymmetric_key_id *skid,
bool partial);
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 96670e7e7..35f99b68d 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,14 +25,6 @@
#include <linux/scatterlist.h>
#include <linux/sched.h>
-static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
- struct scatterlist *sg2)
-{
- sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
- sg1[num - 1].page_link &= ~0x02;
- sg1[num - 1].page_link |= 0x01;
-}
-
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg,
int chain, int num)
@@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
}
if (sg)
- scatterwalk_sg_chain(head, num, sg);
+ sg_chain(head, num, sg);
else
sg_mark_end(head);
}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 07d245f07..d8dd41fb0 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -1,7 +1,7 @@
/*
* Symmetric key ciphers.
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -18,6 +18,28 @@
#include <linux/slab.h>
/**
+ * struct skcipher_request - Symmetric key cipher request
+ * @cryptlen: Number of bytes to encrypt or decrypt
+ * @iv: Initialisation Vector
+ * @src: Source SG list
+ * @dst: Destination SG list
+ * @base: Underlying async request request
+ * @__ctx: Start of private context data
+ */
+struct skcipher_request {
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ struct crypto_async_request base;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
* struct skcipher_givcrypt_request - Crypto request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
struct ablkcipher_request creq;
};
+struct crypto_skcipher {
+ int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct skcipher_request *req);
+ int (*decrypt)(struct skcipher_request *req);
+
+ unsigned int ivsize;
+ unsigned int reqsize;
+
+ struct crypto_tfm base;
+};
+
+#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+ char __##name##_desc[sizeof(struct skcipher_request) + \
+ crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ struct skcipher_request *name = (void *)__##name##_desc
+
static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
struct skcipher_givcrypt_request *req)
{
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
req->seq = seq;
}
+/**
+ * DOC: Symmetric Key Cipher API
+ *
+ * Symmetric key cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the skcipher_request data structure.
+ *
+ * For the symmetric key cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+static inline struct crypto_skcipher *__crypto_skcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_skcipher, base);
+}
+
+/**
+ * crypto_alloc_skcipher() - allocate symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * skcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an skcipher. The returned struct
+ * crypto_skcipher is the cipher handle that is required for any subsequent
+ * API invocation for that skcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_skcipher_tfm(
+ struct crypto_skcipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_skcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_skcipher() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * skcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ * otherwise
+ */
+static inline int crypto_has_skcipher(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+ crypto_skcipher_mask(mask));
+}
+
+/**
+ * crypto_skcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the skcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
+{
+ return tfm->ivsize;
+}
+
+/**
+ * crypto_skcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the skcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_skcipher_blocksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_skcipher_alignmask(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
+}
+
+static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
+}
+
+static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_skcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the skcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return tfm->setkey(tfm, key, keylen);
+}
+
+/**
+ * crypto_skcipher_reqtfm() - obtain cipher handle from request
+ * @req: skcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_skcipher handle when furnishing an skcipher_request
+ * data structure.
+ *
+ * Return: crypto_skcipher handle
+ */
+static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ return __crypto_skcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_skcipher_encrypt() - encrypt plaintext
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return tfm->encrypt(req);
+}
+
+/**
+ * crypto_skcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return tfm->decrypt(req);
+}
+
+/**
+ * DOC: Symmetric Key Cipher Request Handle
+ *
+ * The skcipher_request data structure contains all pointers to data
+ * required for the symmetric key cipher operation. This includes the cipher
+ * handle (which can be used by multiple skcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the skcipher_request_* API calls in a similar way as
+ * skcipher handle to the crypto_skcipher_* API calls.
+ */
+
+/**
+ * crypto_skcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
+{
+ return tfm->reqsize;
+}
+
+/**
+ * skcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing skcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void skcipher_request_set_tfm(struct skcipher_request *req,
+ struct crypto_skcipher *tfm)
+{
+ req->base.tfm = crypto_skcipher_tfm(tfm);
+}
+
+static inline struct skcipher_request *skcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct skcipher_request, base);
+}
+
+/**
+ * skcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the skcipher
+ * encrypt and decrypt API calls. During the allocation, the provided skcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct skcipher_request *skcipher_request_alloc(
+ struct crypto_skcipher *tfm, gfp_t gfp)
+{
+ struct skcipher_request *req;
+
+ req = kmalloc(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tfm), gfp);
+
+ if (likely(req))
+ skcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * skcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void skcipher_request_free(struct skcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * skcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the skcipher_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void skcipher_request_set_callback(struct skcipher_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * skcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_skcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void skcipher_request_set_crypt(
+ struct skcipher_request *req,
+ struct scatterlist *src, struct scatterlist *dst,
+ unsigned int cryptlen, void *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
#endif /* _CRYPTO_SKCIPHER_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc35..bae79f3c4 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
#include <drm/drmP.h>
+struct dw_hdmi;
+
enum {
DW_HDMI_RES_8,
DW_HDMI_RES_10,
@@ -59,4 +61,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
void *data, struct drm_encoder *encoder,
struct resource *iores, int irq,
const struct dw_hdmi_plat_data *plat_data);
+
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa519711..8b5ce7c5d 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@ void drm_err(const char *format, ...);
/*@{*/
/* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP 0x1
-#define DRIVER_PCI_DMA 0x8
-#define DRIVER_SG 0x10
-#define DRIVER_HAVE_DMA 0x20
-#define DRIVER_HAVE_IRQ 0x40
-#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_GEM 0x1000
-#define DRIVER_MODESET 0x2000
-#define DRIVER_PRIME 0x4000
-#define DRIVER_RENDER 0x8000
-#define DRIVER_ATOMIC 0x10000
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
/***********************************************************************/
/** \name Macros to make printk easier */
@@ -675,13 +676,12 @@ struct drm_minor {
/* currently active master for this node. Protected by master_mutex */
struct drm_master *master;
- struct drm_mode_group mode_group;
};
struct drm_pending_vblank_event {
struct drm_pending_event base;
- int pipe;
+ unsigned int pipe;
struct drm_event_vblank event;
};
@@ -700,7 +700,7 @@ struct drm_vblank_crtc {
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
- int crtc; /* crtc index */
+ unsigned int pipe; /* crtc index */
bool enabled; /* so we don't call enable more than
once per disable */
};
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
/*@{*/
/* Driver support (drm_drv.h) */
+extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- int crtc, int *max_error,
+ unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
}
/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a91332..e67aeac2a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
- return state->mode_changed || state->active_changed;
+ return state->mode_changed || state->active_changed ||
+ state->connectors_changed;
}
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8a1..11266d147 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4a2..faaeff7db 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@ struct drm_atomic_state;
* @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
* @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@ struct drm_crtc_state {
bool planes_changed : 1;
bool mode_changed : 1;
bool active_changed : 1;
+ bool connectors_changed : 1;
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@ struct drm_connector_state {
* etc.
*/
struct drm_connector_funcs {
- void (*dpms)(struct drm_connector *connector, int mode);
+ int (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@ struct drm_plane {
uint32_t possible_crtcs;
uint32_t *format_types;
- uint32_t format_count;
+ unsigned int format_count;
bool format_default;
struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@ struct drm_mode_config_funcs {
};
/**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state. But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
- uint32_t num_crtcs;
- uint32_t num_encoders;
- uint32_t num_connectors;
-
- /* list of object IDs for this group */
- uint32_t *id_list;
-};
-
-/**
* struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
* @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@ extern int drm_universal_plane_init(struct drm_device *dev,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
- uint32_t format_count,
+ unsigned int format_count,
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@ extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
}
/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
- list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+#define drm_for_each_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+ /*
+ * The connector hotadd/remove code currently grabs both locks when
+ * updating lists. Hence readers need only hold either of them to be
+ * safe and the check amounts to
+ *
+ * WARN_ON(not_holding(A) && not_holding(B)).
+ */
+ WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+ !drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+ for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
+ connector = list_first_entry(&(dev)->mode_config.connector_list, \
+ struct drm_connector, head); \
+ &connector->head != (&(dev)->mode_config.connector_list); \
+ connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+ list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
+ fb = list_first_entry(&(dev)->mode_config.fb_list, \
+ struct drm_framebuffer, head); \
+ &fb->head != (&(dev)->mode_config.fb_list); \
+ fb = list_next_entry(fb, head))
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68b5..3febb4b9f 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@ struct drm_crtc_helper_funcs {
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
- void (*atomic_begin)(struct drm_crtc *crtc);
- void (*atomic_flush)(struct drm_crtc *crtc);
+ void (*atomic_begin)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
+ void (*atomic_flush)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
};
/**
@@ -190,7 +192,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
@@ -238,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f642f..0212d139a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
#define DP_TEST_SINK_MISC 0x246
# define DP_TEST_CRC_SUPPORTED (1 << 5)
-# define DP_TEST_COUNT_MASK 0x7
+# define DP_TEST_COUNT_MASK 0xf
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
@@ -568,6 +568,10 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
@@ -578,6 +582,7 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index a89f505c8..534009974 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -375,6 +375,7 @@ struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+ void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def..dbab4622b 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@ struct drm_fb_helper {
bool delayed_hotplug;
};
+#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect);
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area);
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image);
+
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect);
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area);
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image);
+
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
+#else
+static inline void drm_fb_helper_prepare(struct drm_device *dev,
+ struct drm_fb_helper *helper,
+ const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn)
+{
+ return 0;
+}
+
+static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
+{
+}
+
+static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_set_par(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline bool
+drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return true;
+}
+
+static inline struct fb_info *
+drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+ return NULL;
+}
+
+static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_fill_var(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
+{
+}
+
+static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
+{
+}
+
+static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+ int state)
+{
+}
+
+static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
+ int bpp_sel)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ return 0;
+}
+
+static inline struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+ int width, int height)
+{
+ return NULL;
+}
+
+static inline struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ return NULL;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+#endif
#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff56..5dd18bfdf 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@ struct drm_crtc;
struct drm_plane;
void drm_modeset_lock_all(struct drm_device *dev);
-int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e16283a..dda401bf9 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
* planes.
*/
-extern int drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
/**
* drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
plane->helper_private = funcs;
}
-extern int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- const struct drm_rect *clip,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-extern int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-extern int drm_primary_helper_disable(struct drm_plane *plane);
-extern void drm_primary_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible);
+int drm_primary_helper_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_primary_helper_disable(struct drm_plane *plane);
+void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64aa..b2d56dd48 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@ struct i915_audio_component {
void (*codec_wake_override)(struct device *, bool enable);
int (*get_cdclk_freq)(struct device *);
} *ops;
+
+ const struct i915_audio_component_audio_ops {
+ void *audio_ptr;
+ /**
+ * Call from i915 driver, notifying the HDA driver that
+ * pin sense and/or ELD information has changed.
+ * @audio_ptr: HDA driver object
+ * @port: Which port has changed (PORTA / PORTB / PORTC etc)
+ */
+ void (*pin_eld_notify)(void *audio_ptr, int port);
+ } *audio_ops;
};
#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdade6..9e9bddaa5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
- phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, u64 *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index aab088d30..63d01c15d 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -31,6 +31,7 @@
#define CLK_FOUT_VPLL 4
#define CLK_FOUT_UPLL 5
#define CLK_FOUT_MPLL 6
+#define CLK_ARM_CLK 7
/* Muxes */
#define CLK_MOUT_MPLL_USER_L 16
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 4273891dc..8183d1c23 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -21,6 +21,7 @@
#define CLK_FOUT_CPLL 6
#define CLK_FOUT_EPLL 7
#define CLK_FOUT_VPLL 8
+#define CLK_ARM_CLK 9
/* gate for special clocks (sclk) */
#define CLK_SCLK_CAM_BAYER 128
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 878086845..8de173ff1 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -251,6 +251,9 @@
#define IMX6QDL_CLK_VIDEO_27M 238
#define IMX6QDL_CLK_MIPI_CORE_CFG 239
#define IMX6QDL_CLK_MIPI_IPG 240
-#define IMX6QDL_CLK_END 241
+#define IMX6QDL_CLK_CAAM_MEM 241
+#define IMX6QDL_CLK_CAAM_ACLK 242
+#define IMX6QDL_CLK_CAAM_IPG 243
+#define IMX6QDL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
new file mode 100644
index 000000000..c343894ce
--- /dev/null
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX6UL_H
+#define __DT_BINDINGS_CLOCK_IMX6UL_H
+
+#define IMX6UL_CLK_DUMMY 0
+#define IMX6UL_CLK_CKIL 1
+#define IMX6UL_CLK_CKIH 2
+#define IMX6UL_CLK_OSC 3
+#define IMX6UL_PLL1_BYPASS_SRC 4
+#define IMX6UL_PLL2_BYPASS_SRC 5
+#define IMX6UL_PLL3_BYPASS_SRC 6
+#define IMX6UL_PLL4_BYPASS_SRC 7
+#define IMX6UL_PLL5_BYPASS_SRC 8
+#define IMX6UL_PLL6_BYPASS_SRC 9
+#define IMX6UL_PLL7_BYPASS_SRC 10
+#define IMX6UL_CLK_PLL1 11
+#define IMX6UL_CLK_PLL2 12
+#define IMX6UL_CLK_PLL3 13
+#define IMX6UL_CLK_PLL4 14
+#define IMX6UL_CLK_PLL5 15
+#define IMX6UL_CLK_PLL6 16
+#define IMX6UL_CLK_PLL7 17
+#define IMX6UL_PLL1_BYPASS 18
+#define IMX6UL_PLL2_BYPASS 19
+#define IMX6UL_PLL3_BYPASS 20
+#define IMX6UL_PLL4_BYPASS 21
+#define IMX6UL_PLL5_BYPASS 22
+#define IMX6UL_PLL6_BYPASS 23
+#define IMX6UL_PLL7_BYPASS 24
+#define IMX6UL_CLK_PLL1_SYS 25
+#define IMX6UL_CLK_PLL2_BUS 26
+#define IMX6UL_CLK_PLL3_USB_OTG 27
+#define IMX6UL_CLK_PLL4_AUDIO 28
+#define IMX6UL_CLK_PLL5_VIDEO 29
+#define IMX6UL_CLK_PLL6_ENET 30
+#define IMX6UL_CLK_PLL7_USB_HOST 31
+#define IMX6UL_CLK_USBPHY1 32
+#define IMX6UL_CLK_USBPHY2 33
+#define IMX6UL_CLK_USBPHY1_GATE 34
+#define IMX6UL_CLK_USBPHY2_GATE 35
+#define IMX6UL_CLK_PLL2_PFD0 36
+#define IMX6UL_CLK_PLL2_PFD1 37
+#define IMX6UL_CLK_PLL2_PFD2 38
+#define IMX6UL_CLK_PLL2_PFD3 39
+#define IMX6UL_CLK_PLL3_PFD0 40
+#define IMX6UL_CLK_PLL3_PFD1 41
+#define IMX6UL_CLK_PLL3_PFD2 42
+#define IMX6UL_CLK_PLL3_PFD3 43
+#define IMX6UL_CLK_ENET_REF 44
+#define IMX6UL_CLK_ENET2_REF 45
+#define IMX6UL_CLK_ENET2_REF_125M 46
+#define IMX6UL_CLK_ENET_PTP_REF 47
+#define IMX6UL_CLK_ENET_PTP 48
+#define IMX6UL_CLK_PLL4_POST_DIV 49
+#define IMX6UL_CLK_PLL4_AUDIO_DIV 50
+#define IMX6UL_CLK_PLL5_POST_DIV 51
+#define IMX6UL_CLK_PLL5_VIDEO_DIV 52
+#define IMX6UL_CLK_PLL2_198M 53
+#define IMX6UL_CLK_PLL3_80M 54
+#define IMX6UL_CLK_PLL3_60M 55
+#define IMX6UL_CLK_STEP 56
+#define IMX6UL_CLK_PLL1_SW 57
+#define IMX6UL_CLK_AXI_ALT_SEL 58
+#define IMX6UL_CLK_AXI_SEL 59
+#define IMX6UL_CLK_PERIPH_PRE 60
+#define IMX6UL_CLK_PERIPH2_PRE 61
+#define IMX6UL_CLK_PERIPH_CLK2_SEL 62
+#define IMX6UL_CLK_PERIPH2_CLK2_SEL 63
+#define IMX6UL_CLK_USDHC1_SEL 64
+#define IMX6UL_CLK_USDHC2_SEL 65
+#define IMX6UL_CLK_BCH_SEL 66
+#define IMX6UL_CLK_GPMI_SEL 67
+#define IMX6UL_CLK_EIM_SLOW_SEL 68
+#define IMX6UL_CLK_SPDIF_SEL 69
+#define IMX6UL_CLK_SAI1_SEL 70
+#define IMX6UL_CLK_SAI2_SEL 71
+#define IMX6UL_CLK_SAI3_SEL 72
+#define IMX6UL_CLK_LCDIF_PRE_SEL 73
+#define IMX6UL_CLK_SIM_PRE_SEL 74
+#define IMX6UL_CLK_LDB_DI0_SEL 75
+#define IMX6UL_CLK_LDB_DI1_SEL 76
+#define IMX6UL_CLK_ENFC_SEL 77
+#define IMX6UL_CLK_CAN_SEL 78
+#define IMX6UL_CLK_ECSPI_SEL 79
+#define IMX6UL_CLK_UART_SEL 80
+#define IMX6UL_CLK_QSPI1_SEL 81
+#define IMX6UL_CLK_PERCLK_SEL 82
+#define IMX6UL_CLK_LCDIF_SEL 83
+#define IMX6UL_CLK_SIM_SEL 84
+#define IMX6UL_CLK_PERIPH 85
+#define IMX6UL_CLK_PERIPH2 86
+#define IMX6UL_CLK_LDB_DI0_DIV_3_5 87
+#define IMX6UL_CLK_LDB_DI0_DIV_7 88
+#define IMX6UL_CLK_LDB_DI1_DIV_3_5 89
+#define IMX6UL_CLK_LDB_DI1_DIV_7 90
+#define IMX6UL_CLK_LDB_DI0_DIV_SEL 91
+#define IMX6UL_CLK_LDB_DI1_DIV_SEL 92
+#define IMX6UL_CLK_ARM 93
+#define IMX6UL_CLK_PERIPH_CLK2 94
+#define IMX6UL_CLK_PERIPH2_CLK2 95
+#define IMX6UL_CLK_AHB 96
+#define IMX6UL_CLK_MMDC_PODF 97
+#define IMX6UL_CLK_AXI_PODF 98
+#define IMX6UL_CLK_PERCLK 99
+#define IMX6UL_CLK_IPG 100
+#define IMX6UL_CLK_USDHC1_PODF 101
+#define IMX6UL_CLK_USDHC2_PODF 102
+#define IMX6UL_CLK_BCH_PODF 103
+#define IMX6UL_CLK_GPMI_PODF 104
+#define IMX6UL_CLK_EIM_SLOW_PODF 105
+#define IMX6UL_CLK_SPDIF_PRED 106
+#define IMX6UL_CLK_SPDIF_PODF 107
+#define IMX6UL_CLK_SAI1_PRED 108
+#define IMX6UL_CLK_SAI1_PODF 109
+#define IMX6UL_CLK_SAI2_PRED 110
+#define IMX6UL_CLK_SAI2_PODF 111
+#define IMX6UL_CLK_SAI3_PRED 112
+#define IMX6UL_CLK_SAI3_PODF 113
+#define IMX6UL_CLK_LCDIF_PRED 114
+#define IMX6UL_CLK_LCDIF_PODF 115
+#define IMX6UL_CLK_SIM_PODF 116
+#define IMX6UL_CLK_QSPI1_PDOF 117
+#define IMX6UL_CLK_ENFC_PRED 118
+#define IMX6UL_CLK_ENFC_PODF 119
+#define IMX6UL_CLK_CAN_PODF 120
+#define IMX6UL_CLK_ECSPI_PODF 121
+#define IMX6UL_CLK_UART_PODF 122
+#define IMX6UL_CLK_ADC1 123
+#define IMX6UL_CLK_ADC2 124
+#define IMX6UL_CLK_AIPSTZ1 125
+#define IMX6UL_CLK_AIPSTZ2 126
+#define IMX6UL_CLK_AIPSTZ3 127
+#define IMX6UL_CLK_APBHDMA 128
+#define IMX6UL_CLK_ASRC_IPG 129
+#define IMX6UL_CLK_ASRC_MEM 130
+#define IMX6UL_CLK_GPMI_BCH_APB 131
+#define IMX6UL_CLK_GPMI_BCH 132
+#define IMX6UL_CLK_GPMI_IO 133
+#define IMX6UL_CLK_GPMI_APB 134
+#define IMX6UL_CLK_CAAM_MEM 135
+#define IMX6UL_CLK_CAAM_ACLK 136
+#define IMX6UL_CLK_CAAM_IPG 137
+#define IMX6UL_CLK_CSI 138
+#define IMX6UL_CLK_ECSPI1 139
+#define IMX6UL_CLK_ECSPI2 140
+#define IMX6UL_CLK_ECSPI3 141
+#define IMX6UL_CLK_ECSPI4 142
+#define IMX6UL_CLK_EIM 143
+#define IMX6UL_CLK_ENET 144
+#define IMX6UL_CLK_ENET_AHB 145
+#define IMX6UL_CLK_EPIT1 146
+#define IMX6UL_CLK_EPIT2 147
+#define IMX6UL_CLK_CAN1_IPG 148
+#define IMX6UL_CLK_CAN1_SERIAL 149
+#define IMX6UL_CLK_CAN2_IPG 150
+#define IMX6UL_CLK_CAN2_SERIAL 151
+#define IMX6UL_CLK_GPT1_BUS 152
+#define IMX6UL_CLK_GPT1_SERIAL 153
+#define IMX6UL_CLK_GPT2_BUS 154
+#define IMX6UL_CLK_GPT2_SERIAL 155
+#define IMX6UL_CLK_I2C1 156
+#define IMX6UL_CLK_I2C2 157
+#define IMX6UL_CLK_I2C3 158
+#define IMX6UL_CLK_I2C4 159
+#define IMX6UL_CLK_IOMUXC 160
+#define IMX6UL_CLK_LCDIF_APB 161
+#define IMX6UL_CLK_LCDIF_PIX 162
+#define IMX6UL_CLK_MMDC_P0_FAST 163
+#define IMX6UL_CLK_MMDC_P0_IPG 164
+#define IMX6UL_CLK_OCOTP 165
+#define IMX6UL_CLK_OCRAM 166
+#define IMX6UL_CLK_PWM1 167
+#define IMX6UL_CLK_PWM2 168
+#define IMX6UL_CLK_PWM3 169
+#define IMX6UL_CLK_PWM4 170
+#define IMX6UL_CLK_PWM5 171
+#define IMX6UL_CLK_PWM6 172
+#define IMX6UL_CLK_PWM7 173
+#define IMX6UL_CLK_PWM8 174
+#define IMX6UL_CLK_PXP 175
+#define IMX6UL_CLK_QSPI 176
+#define IMX6UL_CLK_ROM 177
+#define IMX6UL_CLK_SAI1 178
+#define IMX6UL_CLK_SAI1_IPG 179
+#define IMX6UL_CLK_SAI2 180
+#define IMX6UL_CLK_SAI2_IPG 181
+#define IMX6UL_CLK_SAI3 182
+#define IMX6UL_CLK_SAI3_IPG 183
+#define IMX6UL_CLK_SDMA 184
+#define IMX6UL_CLK_SIM 185
+#define IMX6UL_CLK_SIM_S 186
+#define IMX6UL_CLK_SPBA 187
+#define IMX6UL_CLK_SPDIF 188
+#define IMX6UL_CLK_UART1_IPG 189
+#define IMX6UL_CLK_UART1_SERIAL 190
+#define IMX6UL_CLK_UART2_IPG 191
+#define IMX6UL_CLK_UART2_SERIAL 192
+#define IMX6UL_CLK_UART3_IPG 193
+#define IMX6UL_CLK_UART3_SERIAL 194
+#define IMX6UL_CLK_UART4_IPG 195
+#define IMX6UL_CLK_UART4_SERIAL 196
+#define IMX6UL_CLK_UART5_IPG 197
+#define IMX6UL_CLK_UART5_SERIAL 198
+#define IMX6UL_CLK_UART6_IPG 199
+#define IMX6UL_CLK_UART6_SERIAL 200
+#define IMX6UL_CLK_UART7_IPG 201
+#define IMX6UL_CLK_UART7_SERIAL 202
+#define IMX6UL_CLK_UART8_IPG 203
+#define IMX6UL_CLK_UART8_SERIAL 204
+#define IMX6UL_CLK_USBOH3 205
+#define IMX6UL_CLK_USDHC1 206
+#define IMX6UL_CLK_USDHC2 207
+#define IMX6UL_CLK_WDOG1 208
+#define IMX6UL_CLK_WDOG2 209
+#define IMX6UL_CLK_WDOG3 210
+#define IMX6UL_CLK_LDB_DI0 211
+#define IMX6UL_CLK_AXI 212
+#define IMX6UL_CLK_SPDIF_GCLK 213
+#define IMX6UL_CLK_GPT_3M 214
+#define IMX6UL_CLK_SIM2 215
+#define IMX6UL_CLK_SIM1 216
+#define IMX6UL_CLK_IPP_DI0 217
+#define IMX6UL_CLK_IPP_DI1 218
+#define IMX6UL_CA7_SECONDARY_SEL 219
+#define IMX6UL_CLK_PER_BCH 220
+#define IMX6UL_CLK_CSI_SEL 221
+#define IMX6UL_CLK_CSI_PODF 222
+#define IMX6UL_CLK_PLL3_120M 223
+
+#define IMX6UL_CLK_END 224
+
+#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index ff7ca3584..7b1ad8922 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -108,6 +108,7 @@
#define R8A7790_CLK_VIN2 9
#define R8A7790_CLK_VIN1 10
#define R8A7790_CLK_VIN0 11
+#define R8A7790_CLK_ETHERAVB 12
#define R8A7790_CLK_ETHER 13
#define R8A7790_CLK_SATA1 14
#define R8A7790_CLK_SATA0 15
@@ -143,6 +144,8 @@
#define R8A7790_CLK_SCU_ALL 17
#define R8A7790_CLK_SCU_DVC1 18
#define R8A7790_CLK_SCU_DVC0 19
+#define R8A7790_CLK_SCU_CTU1_MIX1 20
+#define R8A7790_CLK_SCU_CTU0_MIX0 21
#define R8A7790_CLK_SCU_SRC9 22
#define R8A7790_CLK_SCU_SRC8 23
#define R8A7790_CLK_SCU_SRC7 24
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index 402268384..dd09b73c4 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -141,6 +141,8 @@
#define R8A7791_CLK_SCU_ALL 17
#define R8A7791_CLK_SCU_DVC1 18
#define R8A7791_CLK_SCU_DVC0 19
+#define R8A7791_CLK_SCU_CTU1_MIX1 20
+#define R8A7791_CLK_SCU_CTU0_MIX0 21
#define R8A7791_CLK_SCU_SRC9 22
#define R8A7791_CLK_SCU_SRC8 23
#define R8A7791_CLK_SCU_SRC7 24
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
new file mode 100644
index 000000000..1579e07f9
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -0,0 +1,164 @@
+/*
+ * r8a7793 clock definition
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
+#define __DT_BINDINGS_CLOCK_R8A7793_H__
+
+/* CPG */
+#define R8A7793_CLK_MAIN 0
+#define R8A7793_CLK_PLL0 1
+#define R8A7793_CLK_PLL1 2
+#define R8A7793_CLK_PLL3 3
+#define R8A7793_CLK_LB 4
+#define R8A7793_CLK_QSPI 5
+#define R8A7793_CLK_SDH 6
+#define R8A7793_CLK_SD0 7
+#define R8A7793_CLK_Z 8
+#define R8A7793_CLK_RCAN 9
+#define R8A7793_CLK_ADSP 10
+
+/* MSTP0 */
+#define R8A7793_CLK_MSIOF0 0
+
+/* MSTP1 */
+#define R8A7793_CLK_VCP0 1
+#define R8A7793_CLK_VPC0 3
+#define R8A7793_CLK_SSP1 9
+#define R8A7793_CLK_TMU1 11
+#define R8A7793_CLK_3DG 12
+#define R8A7793_CLK_2DDMAC 15
+#define R8A7793_CLK_FDP1_1 18
+#define R8A7793_CLK_FDP1_0 19
+#define R8A7793_CLK_TMU3 21
+#define R8A7793_CLK_TMU2 22
+#define R8A7793_CLK_CMT0 24
+#define R8A7793_CLK_TMU0 25
+#define R8A7793_CLK_VSP1_DU1 27
+#define R8A7793_CLK_VSP1_DU0 28
+#define R8A7793_CLK_VSP1_S 31
+
+/* MSTP2 */
+#define R8A7793_CLK_SCIFA2 2
+#define R8A7793_CLK_SCIFA1 3
+#define R8A7793_CLK_SCIFA0 4
+#define R8A7793_CLK_MSIOF2 5
+#define R8A7793_CLK_SCIFB0 6
+#define R8A7793_CLK_SCIFB1 7
+#define R8A7793_CLK_MSIOF1 8
+#define R8A7793_CLK_SCIFB2 16
+#define R8A7793_CLK_SYS_DMAC1 18
+#define R8A7793_CLK_SYS_DMAC0 19
+
+/* MSTP3 */
+#define R8A7793_CLK_TPU0 4
+#define R8A7793_CLK_SDHI2 11
+#define R8A7793_CLK_SDHI1 12
+#define R8A7793_CLK_SDHI0 14
+#define R8A7793_CLK_MMCIF0 15
+#define R8A7793_CLK_IIC0 18
+#define R8A7793_CLK_PCIEC 19
+#define R8A7793_CLK_IIC1 23
+#define R8A7793_CLK_SSUSB 28
+#define R8A7793_CLK_CMT1 29
+#define R8A7793_CLK_USBDMAC0 30
+#define R8A7793_CLK_USBDMAC1 31
+
+/* MSTP4 */
+#define R8A7793_CLK_IRQC 7
+
+/* MSTP5 */
+#define R8A7793_CLK_AUDIO_DMAC1 1
+#define R8A7793_CLK_AUDIO_DMAC0 2
+#define R8A7793_CLK_ADSP_MOD 6
+#define R8A7793_CLK_THERMAL 22
+#define R8A7793_CLK_PWM 23
+
+/* MSTP7 */
+#define R8A7793_CLK_EHCI 3
+#define R8A7793_CLK_HSUSB 4
+#define R8A7793_CLK_HSCIF2 13
+#define R8A7793_CLK_SCIF5 14
+#define R8A7793_CLK_SCIF4 15
+#define R8A7793_CLK_HSCIF1 16
+#define R8A7793_CLK_HSCIF0 17
+#define R8A7793_CLK_SCIF3 18
+#define R8A7793_CLK_SCIF2 19
+#define R8A7793_CLK_SCIF1 20
+#define R8A7793_CLK_SCIF0 21
+#define R8A7793_CLK_DU1 23
+#define R8A7793_CLK_DU0 24
+#define R8A7793_CLK_LVDS0 26
+
+/* MSTP8 */
+#define R8A7793_CLK_IPMMU_SGX 0
+#define R8A7793_CLK_VIN2 9
+#define R8A7793_CLK_VIN1 10
+#define R8A7793_CLK_VIN0 11
+#define R8A7793_CLK_ETHER 13
+#define R8A7793_CLK_SATA1 14
+#define R8A7793_CLK_SATA0 15
+
+/* MSTP9 */
+#define R8A7793_CLK_GPIO7 4
+#define R8A7793_CLK_GPIO6 5
+#define R8A7793_CLK_GPIO5 7
+#define R8A7793_CLK_GPIO4 8
+#define R8A7793_CLK_GPIO3 9
+#define R8A7793_CLK_GPIO2 10
+#define R8A7793_CLK_GPIO1 11
+#define R8A7793_CLK_GPIO0 12
+#define R8A7793_CLK_RCAN1 15
+#define R8A7793_CLK_RCAN0 16
+#define R8A7793_CLK_QSPI_MOD 17
+#define R8A7793_CLK_I2C5 25
+#define R8A7793_CLK_IICDVFS 26
+#define R8A7793_CLK_I2C4 27
+#define R8A7793_CLK_I2C3 28
+#define R8A7793_CLK_I2C2 29
+#define R8A7793_CLK_I2C1 30
+#define R8A7793_CLK_I2C0 31
+
+/* MSTP10 */
+#define R8A7793_CLK_SSI_ALL 5
+#define R8A7793_CLK_SSI9 6
+#define R8A7793_CLK_SSI8 7
+#define R8A7793_CLK_SSI7 8
+#define R8A7793_CLK_SSI6 9
+#define R8A7793_CLK_SSI5 10
+#define R8A7793_CLK_SSI4 11
+#define R8A7793_CLK_SSI3 12
+#define R8A7793_CLK_SSI2 13
+#define R8A7793_CLK_SSI1 14
+#define R8A7793_CLK_SSI0 15
+#define R8A7793_CLK_SCU_ALL 17
+#define R8A7793_CLK_SCU_DVC1 18
+#define R8A7793_CLK_SCU_DVC0 19
+#define R8A7793_CLK_SCU_SRC9 22
+#define R8A7793_CLK_SCU_SRC8 23
+#define R8A7793_CLK_SCU_SRC7 24
+#define R8A7793_CLK_SCU_SRC6 25
+#define R8A7793_CLK_SCU_SRC5 26
+#define R8A7793_CLK_SCU_SRC4 27
+#define R8A7793_CLK_SCU_SRC3 28
+#define R8A7793_CLK_SCU_SRC2 29
+#define R8A7793_CLK_SCU_SRC1 30
+#define R8A7793_CLK_SCU_SRC0 31
+
+/* MSTP11 */
+#define R8A7793_CLK_SCIFA3 6
+#define R8A7793_CLK_SCIFA4 7
+#define R8A7793_CLK_SCIFA5 8
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */
diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h
index bc1ed1dbd..d3a9824ef 100644
--- a/include/dt-bindings/clock/rk3066a-cru.h
+++ b/include/dt-bindings/clock/rk3066a-cru.h
@@ -13,6 +13,9 @@
* GNU General Public License for more details.
*/
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
+
#include <dt-bindings/clock/rk3188-cru-common.h>
/* soft-reset indices */
@@ -33,3 +36,5 @@
#define SRST_HDMI 96
#define SRST_HDMI_APB 97
#define SRST_CIF1 111
+
+#endif
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 6a370503c..8df77a7c0 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -13,6 +13,9 @@
* GNU General Public License for more details.
*/
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
+
/* core clocks from */
#define PLL_APLL 1
#define PLL_DPLL 2
@@ -248,3 +251,5 @@
#define SRST_PTM1_ATB 141
#define SRST_CTM 142
#define SRST_TS 143
+
+#endif
diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h
index 9fac8edd3..9f2e631f2 100644
--- a/include/dt-bindings/clock/rk3188-cru.h
+++ b/include/dt-bindings/clock/rk3188-cru.h
@@ -13,6 +13,9 @@
* GNU General Public License for more details.
*/
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
+
#include <dt-bindings/clock/rk3188-cru-common.h>
/* soft-reset indices */
@@ -49,3 +52,5 @@
#define SRST_GPU_BRIDGE 121
#define SRST_CTI3 123
#define SRST_CTI3_APB 124
+
+#endif
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index dea419708..c719aacef 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -13,6 +13,9 @@
* GNU General Public License for more details.
*/
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
+
/* core clocks */
#define PLL_APLL 1
#define PLL_DPLL 2
@@ -376,3 +379,5 @@
#define SRST_TSP_CLKIN0 189
#define SRST_TSP_CLKIN1 190
#define SRST_TSP_27M 191
+
+#endif
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
new file mode 100644
index 000000000..9c5dd9ba2
--- /dev/null
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
+
+/* core clocks */
+#define PLL_APLLB 1
+#define PLL_APLLL 2
+#define PLL_DPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_NPLL 6
+#define ARMCLKB 7
+#define ARMCLKL 8
+
+/* sclk gates (special clocks) */
+#define SCLK_GPU_CORE 64
+#define SCLK_SPI0 65
+#define SCLK_SPI1 66
+#define SCLK_SPI2 67
+#define SCLK_SDMMC 68
+#define SCLK_SDIO0 69
+#define SCLK_EMMC 71
+#define SCLK_TSADC 72
+#define SCLK_SARADC 73
+#define SCLK_NANDC0 75
+#define SCLK_UART0 77
+#define SCLK_UART1 78
+#define SCLK_UART2 79
+#define SCLK_UART3 80
+#define SCLK_UART4 81
+#define SCLK_I2S_8CH 82
+#define SCLK_SPDIF_8CH 83
+#define SCLK_I2S_2CH 84
+#define SCLK_TIMER0 85
+#define SCLK_TIMER1 86
+#define SCLK_TIMER2 87
+#define SCLK_TIMER3 88
+#define SCLK_TIMER4 89
+#define SCLK_TIMER5 90
+#define SCLK_TIMER6 91
+#define SCLK_OTGPHY0 93
+#define SCLK_OTG_ADP 96
+#define SCLK_HSICPHY480M 97
+#define SCLK_HSICPHY12M 98
+#define SCLK_MACREF 99
+#define SCLK_VOP0_PWM 100
+#define SCLK_MAC_RX 102
+#define SCLK_MAC_TX 103
+#define SCLK_EDP_24M 104
+#define SCLK_EDP 105
+#define SCLK_RGA 106
+#define SCLK_ISP 107
+#define SCLK_HDCP 108
+#define SCLK_HDMI_HDCP 109
+#define SCLK_HDMI_CEC 110
+#define SCLK_HEVC_CABAC 111
+#define SCLK_HEVC_CORE 112
+#define SCLK_I2S_8CH_OUT 113
+#define SCLK_SDMMC_DRV 114
+#define SCLK_SDIO0_DRV 115
+#define SCLK_EMMC_DRV 117
+#define SCLK_SDMMC_SAMPLE 118
+#define SCLK_SDIO0_SAMPLE 119
+#define SCLK_EMMC_SAMPLE 121
+#define SCLK_USBPHY480M 122
+#define SCLK_PVTM_CORE 123
+#define SCLK_PVTM_GPU 124
+#define SCLK_PVTM_PMU 125
+#define SCLK_SFC 126
+#define SCLK_MAC 127
+#define SCLK_MACREF_OUT 128
+
+#define DCLK_VOP 190
+#define MCLK_CRYPTO 191
+
+/* aclk gates */
+#define ACLK_GPU_MEM 192
+#define ACLK_GPU_CFG 193
+#define ACLK_DMAC_BUS 194
+#define ACLK_DMAC_PERI 195
+#define ACLK_PERI_MMU 196
+#define ACLK_GMAC 197
+#define ACLK_VOP 198
+#define ACLK_VOP_IEP 199
+#define ACLK_RGA 200
+#define ACLK_HDCP 201
+#define ACLK_IEP 202
+#define ACLK_VIO0_NOC 203
+#define ACLK_VIP 204
+#define ACLK_ISP 205
+#define ACLK_VIO1_NOC 206
+#define ACLK_VIDEO 208
+#define ACLK_BUS 209
+#define ACLK_PERI 210
+
+/* pclk gates */
+#define PCLK_GPIO0 320
+#define PCLK_GPIO1 321
+#define PCLK_GPIO2 322
+#define PCLK_GPIO3 323
+#define PCLK_PMUGRF 324
+#define PCLK_MAILBOX 325
+#define PCLK_GRF 329
+#define PCLK_SGRF 330
+#define PCLK_PMU 331
+#define PCLK_I2C0 332
+#define PCLK_I2C1 333
+#define PCLK_I2C2 334
+#define PCLK_I2C3 335
+#define PCLK_I2C4 336
+#define PCLK_I2C5 337
+#define PCLK_SPI0 338
+#define PCLK_SPI1 339
+#define PCLK_SPI2 340
+#define PCLK_UART0 341
+#define PCLK_UART1 342
+#define PCLK_UART2 343
+#define PCLK_UART3 344
+#define PCLK_UART4 345
+#define PCLK_TSADC 346
+#define PCLK_SARADC 347
+#define PCLK_SIM 348
+#define PCLK_GMAC 349
+#define PCLK_PWM0 350
+#define PCLK_PWM1 351
+#define PCLK_TIMER0 353
+#define PCLK_TIMER1 354
+#define PCLK_EDP_CTRL 355
+#define PCLK_MIPI_DSI0 356
+#define PCLK_MIPI_CSI 358
+#define PCLK_HDCP 359
+#define PCLK_HDMI_CTRL 360
+#define PCLK_VIO_H2P 361
+#define PCLK_BUS 362
+#define PCLK_PERI 363
+#define PCLK_DDRUPCTL 364
+#define PCLK_DDRPHY 365
+#define PCLK_ISP 366
+#define PCLK_VIP 367
+#define PCLK_WDT 368
+
+/* hclk gates */
+#define HCLK_SFC 448
+#define HCLK_OTG0 449
+#define HCLK_HOST0 450
+#define HCLK_HOST1 451
+#define HCLK_HSIC 452
+#define HCLK_NANDC0 453
+#define HCLK_TSP 455
+#define HCLK_SDMMC 456
+#define HCLK_SDIO0 457
+#define HCLK_EMMC 459
+#define HCLK_HSADC 460
+#define HCLK_CRYPTO 461
+#define HCLK_I2S_2CH 462
+#define HCLK_I2S_8CH 463
+#define HCLK_SPDIF 464
+#define HCLK_VOP 465
+#define HCLK_ROM 467
+#define HCLK_IEP 468
+#define HCLK_ISP 469
+#define HCLK_RGA 470
+#define HCLK_VIO_AHB_ARBI 471
+#define HCLK_VIO_NOC 472
+#define HCLK_VIP 473
+#define HCLK_VIO_H2P 474
+#define HCLK_VIO_HDCPMMU 475
+#define HCLK_VIDEO 476
+#define HCLK_BUS 477
+#define HCLK_PERI 478
+
+#define CLK_NR_CLKS (HCLK_PERI + 1)
+
+/* soft-reset indices */
+#define SRST_CORE_B0 0
+#define SRST_CORE_B1 1
+#define SRST_CORE_B2 2
+#define SRST_CORE_B3 3
+#define SRST_CORE_B0_PO 4
+#define SRST_CORE_B1_PO 5
+#define SRST_CORE_B2_PO 6
+#define SRST_CORE_B3_PO 7
+#define SRST_L2_B 8
+#define SRST_ADB_B 9
+#define SRST_PD_CORE_B_NIU 10
+#define SRST_PDBUS_STRSYS 11
+#define SRST_SOCDBG_B 14
+#define SRST_CORE_B_DBG 15
+
+#define SRST_DMAC1 18
+#define SRST_INTMEM 19
+#define SRST_ROM 20
+#define SRST_SPDIF8CH 21
+#define SRST_I2S8CH 23
+#define SRST_MAILBOX 24
+#define SRST_I2S2CH 25
+#define SRST_EFUSE_256 26
+#define SRST_MCU_SYS 28
+#define SRST_MCU_PO 29
+#define SRST_MCU_NOC 30
+#define SRST_EFUSE 31
+
+#define SRST_GPIO0 32
+#define SRST_GPIO1 33
+#define SRST_GPIO2 34
+#define SRST_GPIO3 35
+#define SRST_GPIO4 36
+#define SRST_PMUGRF 41
+#define SRST_I2C0 42
+#define SRST_I2C1 43
+#define SRST_I2C2 44
+#define SRST_I2C3 45
+#define SRST_I2C4 46
+#define SRST_I2C5 47
+
+#define SRST_DWPWM 48
+#define SRST_MMC_PERI 49
+#define SRST_PERIPH_MMU 50
+#define SRST_GRF 55
+#define SRST_PMU 56
+#define SRST_PERIPH_AXI 57
+#define SRST_PERIPH_AHB 58
+#define SRST_PERIPH_APB 59
+#define SRST_PERIPH_NIU 60
+#define SRST_PDPERI_AHB_ARBI 61
+#define SRST_EMEM 62
+#define SRST_USB_PERI 63
+
+#define SRST_DMAC2 64
+#define SRST_MAC 66
+#define SRST_GPS 67
+#define SRST_RKPWM 69
+#define SRST_USBHOST0 72
+#define SRST_HSIC 73
+#define SRST_HSIC_AUX 74
+#define SRST_HSIC_PHY 75
+#define SRST_HSADC 76
+#define SRST_NANDC0 77
+#define SRST_SFC 79
+
+#define SRST_SPI0 83
+#define SRST_SPI1 84
+#define SRST_SPI2 85
+#define SRST_SARADC 87
+#define SRST_PDALIVE_NIU 88
+#define SRST_PDPMU_INTMEM 89
+#define SRST_PDPMU_NIU 90
+#define SRST_SGRF 91
+
+#define SRST_VIO_ARBI 96
+#define SRST_RGA_NIU 97
+#define SRST_VIO0_NIU_AXI 98
+#define SRST_VIO_NIU_AHB 99
+#define SRST_LCDC0_AXI 100
+#define SRST_LCDC0_AHB 101
+#define SRST_LCDC0_DCLK 102
+#define SRST_VIP 104
+#define SRST_RGA_CORE 105
+#define SRST_IEP_AXI 106
+#define SRST_IEP_AHB 107
+#define SRST_RGA_AXI 108
+#define SRST_RGA_AHB 109
+#define SRST_ISP 110
+#define SRST_EDP_24M 111
+
+#define SRST_VIDEO_AXI 112
+#define SRST_VIDEO_AHB 113
+#define SRST_MIPIDPHYTX 114
+#define SRST_MIPIDSI0 115
+#define SRST_MIPIDPHYRX 116
+#define SRST_MIPICSI 117
+#define SRST_GPU 120
+#define SRST_HDMI 121
+#define SRST_EDP 122
+#define SRST_PMU_PVTM 123
+#define SRST_CORE_PVTM 124
+#define SRST_GPU_PVTM 125
+#define SRST_GPU_SYS 126
+#define SRST_GPU_MEM_NIU 127
+
+#define SRST_MMC0 128
+#define SRST_SDIO0 129
+#define SRST_EMMC 131
+#define SRST_USBOTG_AHB 132
+#define SRST_USBOTG_PHY 133
+#define SRST_USBOTG_CON 134
+#define SRST_USBHOST0_AHB 135
+#define SRST_USBHOST0_PHY 136
+#define SRST_USBHOST0_CON 137
+#define SRST_USBOTG_UTMI 138
+#define SRST_USBHOST1_UTMI 139
+#define SRST_USB_ADP 141
+
+#define SRST_CORESIGHT 144
+#define SRST_PD_CORE_AHB_NOC 145
+#define SRST_PD_CORE_APB_NOC 146
+#define SRST_GIC 148
+#define SRST_LCDC_PWM0 149
+#define SRST_RGA_H2P_BRG 153
+#define SRST_VIDEO 154
+#define SRST_GPU_CFG_NIU 157
+#define SRST_TSADC 159
+
+#define SRST_DDRPHY0 160
+#define SRST_DDRPHY0_APB 161
+#define SRST_DDRCTRL0 162
+#define SRST_DDRCTRL0_APB 163
+#define SRST_VIDEO_NIU 165
+#define SRST_VIDEO_NIU_AHB 167
+#define SRST_DDRMSCH0 170
+#define SRST_PDBUS_AHB 173
+#define SRST_CRYPTO 174
+
+#define SRST_UART0 179
+#define SRST_UART1 180
+#define SRST_UART2 181
+#define SRST_UART3 182
+#define SRST_UART4 183
+#define SRST_SIMC 186
+#define SRST_TSP 188
+#define SRST_TSP_CLKIN0 189
+
+#define SRST_CORE_L0 192
+#define SRST_CORE_L1 193
+#define SRST_CORE_L2 194
+#define SRST_CORE_L3 195
+#define SRST_CORE_L0_PO 195
+#define SRST_CORE_L1_PO 197
+#define SRST_CORE_L2_PO 198
+#define SRST_CORE_L3_PO 199
+#define SRST_L2_L 200
+#define SRST_ADB_L 201
+#define SRST_PD_CORE_L_NIU 202
+#define SRST_CCI_SYS 203
+#define SRST_CCI_DDR 204
+#define SRST_CCI 205
+#define SRST_SOCDBG_L 206
+#define SRST_CORE_L_DBG 207
+
+#define SRST_CORE_B0_NC 208
+#define SRST_CORE_B0_PO_NC 209
+#define SRST_L2_B_NC 210
+#define SRST_ADB_B_NC 211
+#define SRST_PD_CORE_B_NIU_NC 212
+#define SRST_PDBUS_STRSYS_NC 213
+#define SRST_CORE_L0_NC 214
+#define SRST_CORE_L0_PO_NC 215
+#define SRST_L2_L_NC 216
+#define SRST_ADB_L_NC 217
+#define SRST_PD_CORE_L_NIU_NC 218
+#define SRST_CCI_SYS_NC 219
+#define SRST_CCI_DDR_NC 220
+#define SRST_CCI_NC 221
+#define SRST_TRACE_NC 222
+
+#define SRST_TIMER00 224
+#define SRST_TIMER01 225
+#define SRST_TIMER02 226
+#define SRST_TIMER03 227
+#define SRST_TIMER04 228
+#define SRST_TIMER05 229
+#define SRST_TIMER10 230
+#define SRST_TIMER11 231
+#define SRST_TIMER12 232
+#define SRST_TIMER13 233
+#define SRST_TIMER14 234
+#define SRST_TIMER15 235
+#define SRST_TIMER0_APB 236
+#define SRST_TIMER1_APB 237
+
+#endif
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
index e683dbb7e..26ee564b0 100644
--- a/include/dt-bindings/clock/zx296702-clock.h
+++ b/include/dt-bindings/clock/zx296702-clock.h
@@ -153,7 +153,16 @@
#define ZX296702_I2S0_WCLK 9
#define ZX296702_I2S0_PCLK 10
#define ZX296702_I2S0_DIV 11
-#define ZX296702_LSP0CLK_END 12
+#define ZX296702_I2S1_WCLK_MUX 12
+#define ZX296702_I2S1_WCLK 13
+#define ZX296702_I2S1_PCLK 14
+#define ZX296702_I2S1_DIV 15
+#define ZX296702_I2S2_WCLK_MUX 16
+#define ZX296702_I2S2_WCLK 17
+#define ZX296702_I2S2_PCLK 18
+#define ZX296702_I2S2_DIV 19
+#define ZX296702_GPIO_CLK 20
+#define ZX296702_LSP0CLK_END 21
#define ZX296702_UART0_WCLK_MUX 0
#define ZX296702_UART0_WCLK 1
@@ -165,6 +174,10 @@
#define ZX296702_SDMMC0_WCLK_DIV 7
#define ZX296702_SDMMC0_WCLK 8
#define ZX296702_SDMMC0_PCLK 9
-#define ZX296702_LSP1CLK_END 10
+#define ZX296702_SPDIF1_WCLK_MUX 10
+#define ZX296702_SPDIF1_WCLK 11
+#define ZX296702_SPDIF1_PCLK 12
+#define ZX296702_SPDIF1_DIV 13
+#define ZX296702_LSP1CLK_END 14
#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 000000000..ad9e6ecb9
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
+#define __DT_BINDINGS_DMA_AXI_DMAC_H__
+
+#define AXI_DMAC_BUS_TYPE_AXI_MM 0
+#define AXI_DMAC_BUS_TYPE_AXI_STREAM 1
+#define AXI_DMAC_BUS_TYPE_FIFO 2
+
+#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fdfb..000000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
-
-/*
- * Request type numbers for the JZ4780 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4780_DMA_I2S1_TX 0x4
-#define JZ4780_DMA_I2S1_RX 0x5
-#define JZ4780_DMA_I2S0_TX 0x6
-#define JZ4780_DMA_I2S0_RX 0x7
-#define JZ4780_DMA_AUTO 0x8
-#define JZ4780_DMA_SADC_RX 0x9
-#define JZ4780_DMA_UART4_TX 0xc
-#define JZ4780_DMA_UART4_RX 0xd
-#define JZ4780_DMA_UART3_TX 0xe
-#define JZ4780_DMA_UART3_RX 0xf
-#define JZ4780_DMA_UART2_TX 0x10
-#define JZ4780_DMA_UART2_RX 0x11
-#define JZ4780_DMA_UART1_TX 0x12
-#define JZ4780_DMA_UART1_RX 0x13
-#define JZ4780_DMA_UART0_TX 0x14
-#define JZ4780_DMA_UART0_RX 0x15
-#define JZ4780_DMA_SSI0_TX 0x16
-#define JZ4780_DMA_SSI0_RX 0x17
-#define JZ4780_DMA_SSI1_TX 0x18
-#define JZ4780_DMA_SSI1_RX 0x19
-#define JZ4780_DMA_MSC0_TX 0x1a
-#define JZ4780_DMA_MSC0_RX 0x1b
-#define JZ4780_DMA_MSC1_TX 0x1c
-#define JZ4780_DMA_MSC1_RX 0x1d
-#define JZ4780_DMA_MSC2_TX 0x1e
-#define JZ4780_DMA_MSC2_RX 0x1f
-#define JZ4780_DMA_PCM0_TX 0x20
-#define JZ4780_DMA_PCM0_RX 0x21
-#define JZ4780_DMA_SMB0_TX 0x24
-#define JZ4780_DMA_SMB0_RX 0x25
-#define JZ4780_DMA_SMB1_TX 0x26
-#define JZ4780_DMA_SMB1_RX 0x27
-#define JZ4780_DMA_SMB2_TX 0x28
-#define JZ4780_DMA_SMB2_RX 0x29
-#define JZ4780_DMA_SMB3_TX 0x2a
-#define JZ4780_DMA_SMB3_RX 0x2b
-#define JZ4780_DMA_SMB4_TX 0x2c
-#define JZ4780_DMA_SMB4_RX 0x2d
-#define JZ4780_DMA_DES_TX 0x2e
-#define JZ4780_DMA_DES_RX 0x2f
-
-#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h
new file mode 100644
index 000000000..1d5da81d9
--- /dev/null
+++ b/include/dt-bindings/i2c/i2c.h
@@ -0,0 +1,18 @@
+/*
+ * This header provides constants for I2C bindings
+ *
+ * Copyright (C) 2015 by Sang Engineering
+ * Copyright (C) 2015 by Renesas Electronics Corporation
+ *
+ * Wolfram Sang <wsa@sang-engineering.com>
+ *
+ * GPLv2 only
+ */
+
+#ifndef _DT_BINDINGS_I2C_I2C_H
+#define _DT_BINDINGS_I2C_I2C_H
+
+#define I2C_TEN_BIT_ADDRESS (1 << 31)
+#define I2C_OWN_SLAVE_ADDRESS (1 << 30)
+
+#endif
diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h
new file mode 100644
index 000000000..491c5f974
--- /dev/null
+++ b/include/dt-bindings/leds/leds-ns2.h
@@ -0,0 +1,8 @@
+#ifndef _DT_BINDINGS_LEDS_NS2_H
+#define _DT_BINDINGS_LEDS_NS2_H
+
+#define NS_V2_LED_OFF 0
+#define NS_V2_LED_ON 1
+#define NS_V2_LED_SATA 2
+
+#endif
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
new file mode 100644
index 000000000..a0b5c7be6
--- /dev/null
+++ b/include/dt-bindings/media/c8sectpfe.h
@@ -0,0 +1,12 @@
+#ifndef __DT_C8SECTPFE_H
+#define __DT_C8SECTPFE_H
+
+#define STV0367_TDA18212_NIMA_1 0
+#define STV0367_TDA18212_NIMA_2 1
+#define STV0367_TDA18212_NIMB_1 2
+#define STV0367_TDA18212_NIMB_2 3
+
+#define STV0903_6110_LNB24_NIMA 4
+#define STV0903_6110_LNB24_NIMB 5
+
+#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
new file mode 100644
index 000000000..d1731bc14
--- /dev/null
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -0,0 +1,36 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA210_MC_H
+
+#define TEGRA_SWGROUP_PTC 0
+#define TEGRA_SWGROUP_DC 1
+#define TEGRA_SWGROUP_DCB 2
+#define TEGRA_SWGROUP_AFI 3
+#define TEGRA_SWGROUP_AVPC 4
+#define TEGRA_SWGROUP_HDA 5
+#define TEGRA_SWGROUP_HC 6
+#define TEGRA_SWGROUP_NVENC 7
+#define TEGRA_SWGROUP_PPCS 8
+#define TEGRA_SWGROUP_SATA 9
+#define TEGRA_SWGROUP_MPCORE 10
+#define TEGRA_SWGROUP_ISP2 11
+#define TEGRA_SWGROUP_XUSB_HOST 12
+#define TEGRA_SWGROUP_XUSB_DEV 13
+#define TEGRA_SWGROUP_ISP2B 14
+#define TEGRA_SWGROUP_TSEC 15
+#define TEGRA_SWGROUP_A9AVP 16
+#define TEGRA_SWGROUP_GPU 17
+#define TEGRA_SWGROUP_SDMMC1A 18
+#define TEGRA_SWGROUP_SDMMC2A 19
+#define TEGRA_SWGROUP_SDMMC3A 20
+#define TEGRA_SWGROUP_SDMMC4A 21
+#define TEGRA_SWGROUP_VIC 22
+#define TEGRA_SWGROUP_VI 23
+#define TEGRA_SWGROUP_NVDEC 24
+#define TEGRA_SWGROUP_APE 25
+#define TEGRA_SWGROUP_NVJPG 26
+#define TEGRA_SWGROUP_SE 27
+#define TEGRA_SWGROUP_AXIAP 28
+#define TEGRA_SWGROUP_ETR 29
+#define TEGRA_SWGROUP_TSECB 30
+
+#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75d8..d05894afa 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
#define ST_LPC_MODE_RTC 0
#define ST_LPC_MODE_WDT 1
+#define ST_LPC_MODE_CLKSRC 2
#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index b00bbc9c6..774dc1e84 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -14,6 +14,7 @@
#define MUX_MODE6 6
#define MUX_MODE7 7
#define MUX_MODE8 8
+#define MUX_MODE9 9
#define PULL_DISABLE (1 << 16)
#define PULL_UP (1 << 17)
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 7448edff4..4379e29f0 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,6 +30,26 @@
#define MUX_MODE14 0xe
#define MUX_MODE15 0xf
+/* Certain pins need virtual mode, but note: they may glitch */
+#define MUX_VIRTUAL_MODE0 (MODE_SELECT | (0x0 << 4))
+#define MUX_VIRTUAL_MODE1 (MODE_SELECT | (0x1 << 4))
+#define MUX_VIRTUAL_MODE2 (MODE_SELECT | (0x2 << 4))
+#define MUX_VIRTUAL_MODE3 (MODE_SELECT | (0x3 << 4))
+#define MUX_VIRTUAL_MODE4 (MODE_SELECT | (0x4 << 4))
+#define MUX_VIRTUAL_MODE5 (MODE_SELECT | (0x5 << 4))
+#define MUX_VIRTUAL_MODE6 (MODE_SELECT | (0x6 << 4))
+#define MUX_VIRTUAL_MODE7 (MODE_SELECT | (0x7 << 4))
+#define MUX_VIRTUAL_MODE8 (MODE_SELECT | (0x8 << 4))
+#define MUX_VIRTUAL_MODE9 (MODE_SELECT | (0x9 << 4))
+#define MUX_VIRTUAL_MODE10 (MODE_SELECT | (0xa << 4))
+#define MUX_VIRTUAL_MODE11 (MODE_SELECT | (0xb << 4))
+#define MUX_VIRTUAL_MODE12 (MODE_SELECT | (0xc << 4))
+#define MUX_VIRTUAL_MODE13 (MODE_SELECT | (0xd << 4))
+#define MUX_VIRTUAL_MODE14 (MODE_SELECT | (0xe << 4))
+#define MUX_VIRTUAL_MODE15 (MODE_SELECT | (0xf << 4))
+
+#define MODE_SELECT (1 << 8)
+
#define PULL_ENA (0 << 16)
#define PULL_DIS (1 << 16)
#define PULL_UP (1 << 17)
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c10205491..a15c1704d 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
/* power-source */
+
+/* Digital Input/Output: level [PM8058] */
+#define PM8058_MPP_VPH 0
+#define PM8058_MPP_S3 1
+#define PM8058_MPP_L2 2
+#define PM8058_MPP_L3 3
+
+/* Digital Input/Output: level [PM8901] */
+#define PM8901_MPP_MSMIO 0
+#define PM8901_MPP_DIG 1
+#define PM8901_MPP_L5 2
+#define PM8901_MPP_S4 3
+#define PM8901_MPP_VPH 4
+
+/* Digital Input/Output: level [PM8921] */
+#define PM8921_MPP_S4 1
+#define PM8921_MPP_L15 3
+#define PM8921_MPP_L17 4
+#define PM8921_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8821] */
+#define PM8821_MPP_1P8 0
+#define PM8821_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8018] */
+#define PM8018_MPP_L4 0
+#define PM8018_MPP_L14 1
+#define PM8018_MPP_S3 2
+#define PM8018_MPP_L6 3
+#define PM8018_MPP_L2 4
+#define PM8018_MPP_L5 5
+#define PM8018_MPP_VPH 7
+
+/* Digital Input/Output: level [PM8038] */
+#define PM8038_MPP_L20 0
+#define PM8038_MPP_L11 1
+#define PM8038_MPP_L5 2
+#define PM8038_MPP_L15 3
+#define PM8038_MPP_L17 4
+#define PM8038_MPP_VPH 7
+
#define PM8841_MPP_VPH 0
#define PM8841_MPP_S3 2
@@ -37,6 +78,16 @@
#define PMIC_MPP_AMUX_ROUTE_ABUS3 6
#define PMIC_MPP_AMUX_ROUTE_ABUS4 7
+/* Analog Output: level */
+#define PMIC_MPP_AOUT_LVL_1V25 0
+#define PMIC_MPP_AOUT_LVL_1V25_2 1
+#define PMIC_MPP_AOUT_LVL_0V625 2
+#define PMIC_MPP_AOUT_LVL_0V3125 3
+#define PMIC_MPP_AOUT_LVL_MPP 4
+#define PMIC_MPP_AOUT_LVL_ABUS1 5
+#define PMIC_MPP_AOUT_LVL_ABUS2 6
+#define PMIC_MPP_AOUT_LVL_ABUS3 7
+
/* To be used with "function" */
#define PMIC_MPP_FUNC_NORMAL "normal"
#define PMIC_MPP_FUNC_PAIRED "paired"
diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h
new file mode 100644
index 000000000..b34cee95a
--- /dev/null
+++ b/include/dt-bindings/power/mt8173-power.h
@@ -0,0 +1,15 @@
+#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
+#define _DT_BINDINGS_POWER_MT8183_POWER_H
+
+#define MT8173_POWER_DOMAIN_VDEC 0
+#define MT8173_POWER_DOMAIN_VENC 1
+#define MT8173_POWER_DOMAIN_ISP 2
+#define MT8173_POWER_DOMAIN_MM 3
+#define MT8173_POWER_DOMAIN_VENC_LT 4
+#define MT8173_POWER_DOMAIN_AUDIO 5
+#define MT8173_POWER_DOMAIN_USB 6
+#define MT8173_POWER_DOMAIN_MFG_ASYNC 7
+#define MT8173_POWER_DOMAIN_MFG_2D 8
+#define MT8173_POWER_DOMAIN_MFG 9
+
+#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h
new file mode 100644
index 000000000..acb0bbf4f
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014, Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
+#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
+
+/* MPUMODRST */
+#define CPU0_RESET 0
+#define CPU1_RESET 1
+#define WDS_RESET 2
+#define SCUPER_RESET 3
+
+/* PER0MODRST */
+#define EMAC0_RESET 32
+#define EMAC1_RESET 33
+#define EMAC2_RESET 34
+#define USB0_RESET 35
+#define USB1_RESET 36
+#define NAND_RESET 37
+#define QSPI_RESET 38
+#define SDMMC_RESET 39
+#define EMAC0_OCP_RESET 40
+#define EMAC1_OCP_RESET 41
+#define EMAC2_OCP_RESET 42
+#define USB0_OCP_RESET 43
+#define USB1_OCP_RESET 44
+#define NAND_OCP_RESET 45
+#define QSPI_OCP_RESET 46
+#define SDMMC_OCP_RESET 47
+#define DMA_RESET 48
+#define SPIM0_RESET 49
+#define SPIM1_RESET 50
+#define SPIS0_RESET 51
+#define SPIS1_RESET 52
+#define DMA_OCP_RESET 53
+#define EMAC_PTP_RESET 54
+/* 55 is empty*/
+#define DMAIF0_RESET 56
+#define DMAIF1_RESET 57
+#define DMAIF2_RESET 58
+#define DMAIF3_RESET 59
+#define DMAIF4_RESET 60
+#define DMAIF5_RESET 61
+#define DMAIF6_RESET 62
+#define DMAIF7_RESET 63
+
+/* PER1MODRST */
+#define L4WD0_RESET 64
+#define L4WD1_RESET 65
+#define L4SYSTIMER0_RESET 66
+#define L4SYSTIMER1_RESET 67
+#define SPTIMER0_RESET 68
+#define SPTIMER1_RESET 69
+/* 70-71 is reserved */
+#define I2C0_RESET 72
+#define I2C1_RESET 73
+#define I2C2_RESET 74
+#define I2C3_RESET 75
+#define I2C4_RESET 76
+/* 77-79 is reserved */
+#define UART0_RESET 80
+#define UART1_RESET 81
+/* 82-87 is reserved */
+#define GPIO0_RESET 88
+#define GPIO1_RESET 89
+#define GPIO2_RESET 90
+
+/* BRGMODRST */
+#define HPS2FPGA_RESET 96
+#define LWHPS2FPGA_RESET 97
+#define FPGA2HPS_RESET 98
+#define F2SSDRAM0_RESET 99
+#define F2SSDRAM1_RESET 100
+#define F2SSDRAM2_RESET 101
+#define DDRSCH_RESET 102
+
+/* SYSMODRST*/
+#define ROM_RESET 128
+#define OCRAM_RESET 129
+/* 130 is reserved */
+#define FPGAMGR_RESET 131
+#define S2F_RESET 132
+#define SYSDBG_RESET 133
+#define OCRAM_OCP_RESET 134
+
+/* COLDMODRST */
+#define CLKMGRCOLD_RESET 160
+/* 161-162 is reserved */
+#define S2FCOLD_RESET 163
+#define TIMESTAMPCOLD_RESET 164
+#define TAPCOLD_RESET 165
+#define HMCCOLD_RESET 166
+#define IOMGRCOLD_RESET 167
+
+/* NRSTMODRST */
+#define NRSTPINOE_RESET 192
+
+/* DBGMODRST */
+#define DBG_RESET 224
+#endif
diff --git a/include/dt-bindings/reset-controller/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h
index 02d4328fe..02d4328fe 100644
--- a/include/dt-bindings/reset-controller/stih407-resets.h
+++ b/include/dt-bindings/reset/stih407-resets.h
diff --git a/include/dt-bindings/reset-controller/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h
index c2329fe29..c2329fe29 100644
--- a/include/dt-bindings/reset-controller/stih415-resets.h
+++ b/include/dt-bindings/reset/stih415-resets.h
diff --git a/include/dt-bindings/reset-controller/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h
index fcf9af1ac..fcf9af1ac 100644
--- a/include/dt-bindings/reset-controller/stih416-resets.h
+++ b/include/dt-bindings/reset/stih416-resets.h
diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h
new file mode 100644
index 000000000..070e4f6e7
--- /dev/null
+++ b/include/dt-bindings/reset/tegra124-car.h
@@ -0,0 +1,12 @@
+/*
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
+ */
+
+#ifndef _DT_BINDINGS_RESET_TEGRA124_CAR_H
+#define _DT_BINDINGS_RESET_TEGRA124_CAR_H
+
+#define TEGRA124_RESET(x) (6 * 32 + (x))
+#define TEGRA124_RST_DFLL_DVCO TEGRA124_RESET(0)
+
+#endif /* _DT_BINDINGS_RESET_TEGRA124_CAR_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 72665eb80..b20cd885c 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -15,6 +15,7 @@
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
#include <linux/key.h>
+#include <crypto/public_key.h>
extern struct key *system_trusted_keyring;
static inline struct key *get_system_trusted_keyring(void)
@@ -28,4 +29,10 @@ static inline struct key *get_system_trusted_keyring(void)
}
#endif
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+extern int system_verify_data(const void *data, unsigned long len,
+ const void *raw_pkcs7, size_t pkcs7_len,
+ enum key_being_used_for usage);
+#endif
+
#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index e5966758c..e1e4d7c38 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -52,13 +52,16 @@ struct arch_timer_cpu {
/* Timer IRQ */
const struct kvm_irq_level *irq;
+
+ /* VGIC mapping */
+ struct irq_phys_map *map;
};
int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq);
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 133ea00aa..4e14dac28 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,11 +35,7 @@
#define VGIC_V3_MAX_LRS 16
#define VGIC_MAX_IRQS 1024
#define VGIC_V2_MAX_CPUS 8
-
-/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 255)
-#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
-#endif
+#define VGIC_V3_MAX_CPUS 255
#if (VGIC_NR_IRQS_LEGACY & 31)
#error "VGIC_NR_IRQS must be a multiple of 32"
@@ -95,11 +91,15 @@ enum vgic_type {
#define LR_STATE_ACTIVE (1 << 1)
#define LR_STATE_MASK (3 << 0)
#define LR_EOI_INT (1 << 2)
+#define LR_HW (1 << 3)
struct vgic_lr {
- u16 irq;
- u8 source;
- u8 state;
+ unsigned irq:10;
+ union {
+ unsigned hwirq:10;
+ unsigned source:3;
+ };
+ unsigned state:4;
};
struct vgic_vmcr {
@@ -155,6 +155,19 @@ struct vgic_io_device {
struct kvm_io_device dev;
};
+struct irq_phys_map {
+ u32 virt_irq;
+ u32 phys_irq;
+ u32 irq;
+ bool active;
+};
+
+struct irq_phys_map_entry {
+ struct list_head entry;
+ struct rcu_head rcu;
+ struct irq_phys_map map;
+};
+
struct vgic_dist {
spinlock_t lock;
bool in_kernel;
@@ -252,6 +265,10 @@ struct vgic_dist {
struct vgic_vm_ops vm_ops;
struct vgic_io_device dist_iodev;
struct vgic_io_device *redist_iodevs;
+
+ /* Virtual irq to hwirq mapping */
+ spinlock_t irq_phys_map_lock;
+ struct list_head irq_phys_map_list;
};
struct vgic_v2_cpu_if {
@@ -303,6 +320,9 @@ struct vgic_cpu {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
+
+ /* Protected by the distributor's irq_phys_map_lock */
+ struct list_head irq_phys_map_list;
};
#define LR_EMPTY 0xff
@@ -317,16 +337,25 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_get_max_vcpus(void);
+void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+ struct irq_phys_map *map, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+ int virt_irq, int irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0b2394f61..43856d19c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -221,6 +217,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index 945d44ae5..ab3a6c002 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@ enum asn1_opcode {
ASN1_OP_MATCH_JUMP = 0x04,
ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
ASN1_OP_MATCH_ANY = 0x08,
+ ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
ASN1_OP_MATCH_ANY_ACT = 0x0a,
+ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
/* Everything before here matches unconditionally */
ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
ASN1_OP_COND_MATCH_ANY = 0x18,
+ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
+ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
/* Everything before here will want a tag from the data */
-#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
+#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
/* These are here to help fill up space */
- ASN1_OP_COND_FAIL = 0x1b,
- ASN1_OP_COMPLETE = 0x1c,
- ASN1_OP_ACT = 0x1d,
- ASN1_OP_RETURN = 0x1e,
+ ASN1_OP_COND_FAIL = 0x1c,
+ ASN1_OP_COMPLETE = 0x1d,
+ ASN1_OP_ACT = 0x1e,
+ ASN1_OP_MAYBE_ACT = 0x1f,
/* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
#define ASN1_OP_END__OF 0x02
#define ASN1_OP_END__ACT 0x04
+ ASN1_OP_RETURN = 0x28,
+
ASN1_OP__NR
};
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 00beddf6b..ee696d7e8 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
-#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
-#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
-#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
-#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
-#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
-#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
-#define ATMEL_US_SENDA (1 << 12) /* Send Address */
-#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
-#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
-#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
-#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
+#define ATMEL_US_CR 0x00 /* Control Register */
+#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
+#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
+#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
+#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
+#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
+#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
+#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
+#define ATMEL_US_STTBRK BIT(9) /* Start Break */
+#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
+#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
+#define ATMEL_US_SENDA BIT(12) /* Send Address */
+#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
+#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
+#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
+#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
+#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
+#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
+#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
+#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
+#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
+#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
+#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
+#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL (3 << 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR (7 << 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
-#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
-#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
-#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
-#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
+#define ATMEL_US_MR 0x04 /* Mode Register */
+#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
+#define ATMEL_US_USMODE_NORMAL 0
+#define ATMEL_US_USMODE_RS485 1
+#define ATMEL_US_USMODE_HWHS 2
+#define ATMEL_US_USMODE_MODEM 3
+#define ATMEL_US_USMODE_ISO7816_T0 4
+#define ATMEL_US_USMODE_ISO7816_T1 6
+#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
+#define ATMEL_US_USCLKS_MCK (0 << 4)
+#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
+#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
+#define ATMEL_US_CHRL_5 (0 << 6)
+#define ATMEL_US_CHRL_6 (1 << 6)
+#define ATMEL_US_CHRL_7 (2 << 6)
+#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
+#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
+#define ATMEL_US_PAR_EVEN (0 << 9)
+#define ATMEL_US_PAR_ODD (1 << 9)
+#define ATMEL_US_PAR_SPACE (2 << 9)
+#define ATMEL_US_PAR_MARK (3 << 9)
+#define ATMEL_US_PAR_NONE (4 << 9)
+#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
+#define ATMEL_US_NBSTOP_1 (0 << 12)
+#define ATMEL_US_NBSTOP_1_5 (1 << 12)
+#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
+#define ATMEL_US_CHMODE_NORMAL (0 << 14)
+#define ATMEL_US_CHMODE_ECHO (1 << 14)
+#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
+#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_MSBF BIT(16) /* Bit Order */
+#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
+#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
+#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
+#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
+#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
+#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
+#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
-#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
-#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
-#define ATMEL_US_PARE (1 << 7) /* Parity Error */
-#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
-#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
-#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
-#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
-#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
-#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
-#define ATMEL_US_RI (1 << 20) /* RI */
-#define ATMEL_US_DSR (1 << 21) /* DSR */
-#define ATMEL_US_DCD (1 << 22) /* DCD */
-#define ATMEL_US_CTS (1 << 23) /* CTS */
+#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
+#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
+#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
+#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
+#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
+#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
+#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
+#define ATMEL_US_FRAME BIT(6) /* Framing Error */
+#define ATMEL_US_PARE BIT(7) /* Parity Error */
+#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
+#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
+#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
+#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
+#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
+#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
+#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
+#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
+#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
+#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
+#define ATMEL_US_RI BIT(20) /* RI */
+#define ATMEL_US_DSR BIT(21) /* DSR */
+#define ATMEL_US_DCD BIT(22) /* DCD */
+#define ATMEL_US_CTS BIT(23) /* CTS */
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
+#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
+#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
+#define ATMEL_US_CSR 0x14 /* Channel Status Register */
+#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
+#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
+#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
+#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
+#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
-#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
+#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
+#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
+#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
+#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
+#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
+#define ATMEL_US_NER 0x44 /* Number of Errors Register */
+#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-#define ATMEL_US_NAME 0xf0 /* Ip Name */
-#define ATMEL_US_VERSION 0xfc /* Ip Version */
+#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
+#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
+#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
+#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
+#define ATMEL_US_ONE_DATA 0x0
+#define ATMEL_US_TWO_DATA 0x1
+#define ATMEL_US_FOUR_DATA 0x2
+#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
+#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
+#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
+#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
+
+#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
+#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
+#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
+
+#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
+#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
+#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
+#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
+#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
+#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
+#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
+#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
+#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
+#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
+#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
+#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
+#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
+#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
+
+#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540..00a5763e8 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+#define __atomic_op_release(op, args...) \
+({ \
+ smp_mb__before_atomic(); \
+ op##_relaxed(args); \
+})
+
+#define __atomic_op_fence(op, args...) \
+({ \
+ typeof(op##_relaxed(args)) __ret; \
+ smp_mb__before_atomic(); \
+ __ret = op##_relaxed(args); \
+ smp_mb__after_atomic(); \
+ __ret; \
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_relaxed atomic_add_return
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define atomic_add_return_acquire(...) \
+ __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define atomic_add_return_release(...) \
+ __atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define atomic_add_return(...) \
+ __atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define atomic_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define atomic_sub_return_release(...) \
+ __atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define atomic_sub_return(...) \
+ __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_relaxed atomic_xchg
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define atomic_xchg_acquire(...) \
+ __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define atomic_xchg_release(...) \
+ __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define atomic_xchg(...) \
+ __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define atomic_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define atomic_cmpxchg_release(...) \
+ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(...) \
+ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_relaxed atomic64_add_return
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define atomic64_add_return_acquire(...) \
+ __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define atomic64_add_return_release(...) \
+ __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define atomic64_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define atomic64_sub_return_release(...) \
+ __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define atomic64_sub_return(...) \
+ __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_relaxed atomic64_xchg
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define atomic64_xchg_acquire(...) \
+ __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define atomic64_xchg_release(...) \
+ __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define atomic64_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define atomic64_cmpxchg_release(...) \
+ __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+ __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+ __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+ __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+ __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+ __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+ __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define xchg_relaxed xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
/**
* atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+ atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
+
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
- int old;
- int new;
-
- do {
- old = atomic_read(v);
- new = old | i;
- } while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+ atomic64_and(~i, v);
+}
+#endif
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c2e7e3a83..b2abc996c 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -27,6 +27,9 @@
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#define AUDIT_INO_UNSET ((unsigned long)-1)
+#define AUDIT_DEV_UNSET ((dev_t)-1)
+
struct audit_sig_info {
uid_t uid;
pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
struct audit_field *inode_f; /* quick access to an inode field */
struct audit_watch *watch; /* associated watch */
struct audit_tree *tree; /* associated watched tree */
+ struct audit_fsnotify_mark *exe;
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
struct list_head list; /* for AUDIT_LIST* purposes only */
u64 prio;
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd74..d04aa5828 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
/* Exponentially weighted moving average (EWMA) */
-/* For more documentation see lib/average.c */
-
-struct ewma {
- unsigned long internal;
- unsigned long factor;
- unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
- unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
- return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight) \
+ struct ewma_##name { \
+ unsigned long internal; \
+ }; \
+ static inline void ewma_##name##_init(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ e->internal = 0; \
+ } \
+ static inline unsigned long \
+ ewma_##name##_read(struct ewma_##name *e) \
+ { \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ return e->internal >> ilog2(_factor); \
+ } \
+ static inline void ewma_##name##_add(struct ewma_##name *e, \
+ unsigned long val) \
+ { \
+ unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long weight = ilog2(_weight); \
+ unsigned long factor = ilog2(_factor); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
+ BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
+ BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
+ \
+ ACCESS_ONCE(e->internal) = internal ? \
+ (((internal << weight) - internal) + \
+ (val << factor)) >> weight : \
+ (val << factor); \
+ }
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index a23209b43..1b4d69f68 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
+ struct list_head bdi_node; /* anchored at bdi->wb_list */
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct percpu_ref refcnt; /* used only for !root wb's */
struct fprop_local_percpu memcg_completions;
@@ -150,6 +152,7 @@ struct backing_dev_info {
atomic_long_t tot_write_bandwidth;
struct bdi_writeback wb; /* the root writeback info for this bdi */
+ struct list_head wb_list; /* list of all wbs */
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0fe9df983..c85f74946 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,18 +13,23 @@
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
+#include <linux/memcontrol.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_destroy(struct backing_dev_info *bdi);
+void bdi_exit(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+void bdi_unregister(struct backing_dev_info *bdi);
+
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+void bdi_destroy(struct backing_dev_info *bdi);
+
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -252,13 +257,19 @@ int inode_congested(struct inode *inode, int cong_bits);
* @inode: inode of interest
*
* cgroup writeback requires support from both the bdi and filesystem.
- * Test whether @inode has both.
+ * Also, both memcg and iocg have to be on the default hierarchy. Test
+ * whether all conditions are met.
+ *
+ * Note that the test result may change dynamically on the same inode
+ * depending on how memcg and iocg are configured.
*/
static inline bool inode_cgwb_enabled(struct inode *inode)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
- return bdi_cap_account_dirty(bdi) &&
+ return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
+ cgroup_on_dfl(blkcg_root_css->cgroup) &&
+ bdi_cap_account_dirty(bdi) &&
(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -286,7 +297,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi
* %current's blkcg equals the effective blkcg of its memcg. No
* need to use the relatively expensive cgroup_get_e_css().
*/
- if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
+ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
return wb;
return NULL;
}
@@ -401,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
rcu_read_unlock();
}
-struct wb_iter {
- int start_blkcg_id;
- struct radix_tree_iter tree_iter;
- void **slot;
-};
-
-static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
- struct backing_dev_info *bdi)
-{
- struct radix_tree_iter *titer = &iter->tree_iter;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (iter->start_blkcg_id >= 0) {
- iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
- iter->start_blkcg_id = -1;
- } else {
- iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
- }
-
- if (!iter->slot)
- iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
- if (iter->slot)
- return *iter->slot;
- return NULL;
-}
-
-static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
- struct backing_dev_info *bdi,
- int start_blkcg_id)
-{
- iter->start_blkcg_id = start_blkcg_id;
-
- if (start_blkcg_id)
- return __wb_iter_next(iter, bdi);
- else
- return &bdi->wb;
-}
-
-/**
- * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
- * @wb_cur: cursor struct bdi_writeback pointer
- * @bdi: bdi to walk wb's of
- * @iter: pointer to struct wb_iter to be used as iteration buffer
- * @start_blkcg_id: blkcg ID to start iteration from
- *
- * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
- * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
- * to be used as temp storage during iteration. rcu_read_lock() must be
- * held throughout iteration.
- */
-#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
- for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
- (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
-
#else /* CONFIG_CGROUP_WRITEBACK */
static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -515,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
{
}
-struct wb_iter {
- int next_id;
-};
-
-#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
- for ((iter)->next_id = (start_blkcg_id); \
- ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
-
static inline int inode_congested(struct inode *inode, int cong_bits)
{
return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea946e..ed3768f4e 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65..cf038431a 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
- struct irq_domain *irq_domain;
#endif
};
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 9f46b625d..e9f0d062e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -189,17 +189,6 @@ static inline void *bio_data(struct bio *bio)
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
-{
- return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
-}
-
-#define bio_io_error(bio) bio_endio((bio), -EIO)
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -308,6 +297,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
atomic_set(&bio->__bi_cnt, count);
}
+static inline bool bio_flagged(struct bio *bio, unsigned int bit)
+{
+ return (bio->bi_flags & (1U << bit)) != 0;
+}
+
+static inline void bio_set_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags |= (1U << bit);
+}
+
+static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
+{
+ bio->bi_flags &= ~(1U << bit);
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -428,7 +432,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
-extern void bio_endio(struct bio *, int);
+extern void bio_endio(struct bio *);
+
+static inline void bio_io_error(struct bio *bio)
+{
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+}
+
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -442,7 +453,6 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
-extern int bio_get_nr_vecs(struct block_device *);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
const struct iov_iter *, gfp_t);
@@ -719,7 +729,7 @@ extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_prep(struct bio *);
-extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_endio(struct bio *);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index ea17cca9e..9653fdb76 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 297f5bda4..e63553386 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-static __inline__ int get_bitmask_order(unsigned int count)
+static inline int get_bitmask_order(unsigned int count)
{
int order;
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
-static __inline__ int get_count_order(unsigned int count)
+static inline int get_count_order(unsigned int count)
{
int order;
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
return order;
}
-static inline unsigned long hweight_long(unsigned long w)
+static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b62d768c..c02e66994 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,12 +14,15 @@
*/
#include <linux/cgroup.h>
-#include <linux/u64_stats_sync.h>
+#include <linux/percpu_counter.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
+/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
+#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
@@ -45,7 +48,7 @@ struct blkcg {
struct blkcg_gq *blkg_hint;
struct hlist_head blkg_list;
- struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
+ struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -53,14 +56,19 @@ struct blkcg {
#endif
};
+/*
+ * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
+ * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
+ * to carry result values from read and sum operations.
+ */
struct blkg_stat {
- struct u64_stats_sync syncp;
- uint64_t cnt;
+ struct percpu_counter cpu_cnt;
+ atomic64_t aux_cnt;
};
struct blkg_rwstat {
- struct u64_stats_sync syncp;
- uint64_t cnt[BLKG_RWSTAT_NR];
+ struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
+ atomic64_t aux_cnt[BLKG_RWSTAT_NR];
};
/*
@@ -68,32 +76,28 @@ struct blkg_rwstat {
* request_queue (q). This is used by blkcg policies which need to track
* information per blkcg - q pair.
*
- * There can be multiple active blkcg policies and each has its private
- * data on each blkg, the size of which is determined by
- * blkcg_policy->pd_size. blkcg core allocates and frees such areas
- * together with blkg and invokes pd_init/exit_fn() methods.
- *
- * Such private data must embed struct blkg_policy_data (pd) at the
- * beginning and pd_size can't be smaller than pd.
+ * There can be multiple active blkcg policies and each blkg:policy pair is
+ * represented by a blkg_policy_data which is allocated and freed by each
+ * policy's pd_alloc/free_fn() methods. A policy can allocate private data
+ * area by allocating larger data structure which embeds blkg_policy_data
+ * at the beginning.
*/
struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
-
- /* used during policy activation */
- struct list_head alloc_node;
};
/*
- * Policies that need to keep per-blkcg data which is independent
- * from any request_queue associated to it must specify its size
- * with the cpd_size field of the blkcg_policy structure and
- * embed a blkcg_policy_data in it. cpd_init() is invoked to let
- * each policy handle per-blkcg data.
+ * Policies that need to keep per-blkcg data which is independent from any
+ * request_queue associated to it should implement cpd_alloc/free_fn()
+ * methods. A policy can allocate private data area by allocating larger
+ * data structure which embeds blkcg_policy_data at the beginning.
+ * cpd_init() is invoked to let each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
- /* the policy id this per-policy data belongs to */
+ /* the blkcg and policy id this per-policy data belongs to */
+ struct blkcg *blkcg;
int plid;
};
@@ -123,40 +127,50 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
+
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
};
-typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
-typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
+typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
+typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
struct blkcg_policy {
int plid;
- /* policy specific private data size */
- size_t pd_size;
- /* policy specific per-blkcg data size */
- size_t cpd_size;
/* cgroup files for the policy */
- struct cftype *cftypes;
+ struct cftype *dfl_cftypes;
+ struct cftype *legacy_cftypes;
/* operations */
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
+ blkcg_pol_bind_cpd_fn *cpd_bind_fn;
+
+ blkcg_pol_alloc_pd_fn *pd_alloc_fn;
blkcg_pol_init_pd_fn *pd_init_fn;
blkcg_pol_online_pd_fn *pd_online_fn;
blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
+ struct request_queue *q, bool update_hint);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
+const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
@@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off);
+int blkg_print_stat_bytes(struct seq_file *sf, void *v);
+int blkg_print_stat_ios(struct seq_file *sf, void *v);
+int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
+int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
- int off);
+u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol, int off);
struct blkg_conf_ctx {
struct gendisk *disk;
struct blkcg_gq *blkg;
- u64 v;
+ char *body;
};
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- const char *input, struct blkg_conf_ctx *ctx);
+ char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
@@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
+ return css_to_blkcg(task_css(tsk, io_cgrp_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
- return task_get_css(task, blkio_cgrp_id);
+ return task_get_css(task, io_cgrp_id);
}
/**
@@ -233,6 +253,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
}
/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state. If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
+static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q,
+ bool update_hint)
+{
+ struct blkcg_gq *blkg;
+
+ if (blkcg == &blkcg_root)
+ return q->root_blkg;
+
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ return blkg_lookup_slowpath(blkcg, q, update_hint);
+}
+
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q, false);
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
struct blkcg_policy *pol)
{
- return blkcg ? blkcg->pd[pol->plid] : NULL;
+ return blkcg ? blkcg->cpd[pol->plid] : NULL;
}
/**
@@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
return pd ? pd->blkg : NULL;
}
+static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
+{
+ return cpd ? cpd->blkcg : NULL;
+}
+
/**
* blkg_path - format cgroup path of blkg
* @blkg: blkg of interest
@@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
-struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
- bool update_hint);
-
/**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
@@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
* or if either the blkcg or queue is going away. Fall back to
* root_rl in such cases.
*/
- blkg = blkg_lookup_create(blkcg, q);
- if (unlikely(IS_ERR(blkg)))
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg))
goto root_rl;
blkg_get(blkg);
@@ -394,8 +462,7 @@ root_rl:
*/
static inline void blk_put_rl(struct request_list *rl)
{
- /* root_rl may not have blkg set */
- if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ if (rl->blkg->blkcg != &blkcg_root)
blkg_put(rl->blkg);
}
@@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-static inline void blkg_stat_init(struct blkg_stat *stat)
+static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
- u64_stats_init(&stat->syncp);
+ int ret;
+
+ ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
+ if (ret)
+ return ret;
+
+ atomic64_set(&stat->aux_cnt, 0);
+ return 0;
+}
+
+static inline void blkg_stat_exit(struct blkg_stat *stat)
+{
+ percpu_counter_destroy(&stat->cpu_cnt);
}
/**
@@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat)
* @stat: target blkg_stat
* @val: value to add
*
- * Add @val to @stat. The caller is responsible for synchronizing calls to
- * this function.
+ * Add @val to @stat. The caller must ensure that IRQ on the same CPU
+ * don't re-enter this function for the same counter.
*/
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
- u64_stats_update_begin(&stat->syncp);
- stat->cnt += val;
- u64_stats_update_end(&stat->syncp);
+ __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_stat_read - read the current value of a blkg_stat
* @stat: blkg_stat to read
- *
- * Read the current value of @stat. This function can be called without
- * synchroniztion and takes care of u64 atomicity.
*/
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
- unsigned int start;
- uint64_t v;
-
- do {
- start = u64_stats_fetch_begin_irq(&stat->syncp);
- v = stat->cnt;
- } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
-
- return v;
+ return percpu_counter_sum_positive(&stat->cpu_cnt);
}
/**
@@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
*/
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
- stat->cnt = 0;
+ percpu_counter_set(&stat->cpu_cnt, 0);
+ atomic64_set(&stat->aux_cnt, 0);
}
/**
- * blkg_stat_merge - merge a blkg_stat into another
+ * blkg_stat_add_aux - add a blkg_stat into another's aux count
* @to: the destination blkg_stat
* @from: the source
*
- * Add @from's count to @to.
+ * Add @from's count including the aux one to @to's aux count.
*/
-static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+static inline void blkg_stat_add_aux(struct blkg_stat *to,
+ struct blkg_stat *from)
{
- blkg_stat_add(to, blkg_stat_read(from));
+ atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
+ &to->aux_cnt);
}
-static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
{
- u64_stats_init(&rwstat->syncp);
+ int i, ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
+ if (ret) {
+ while (--i >= 0)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ return ret;
+ }
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+ return 0;
+}
+
+static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
}
/**
@@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
int rw, uint64_t val)
{
- u64_stats_update_begin(&rwstat->syncp);
+ struct percpu_counter *cnt;
if (rw & REQ_WRITE)
- rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
- rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
+
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+
if (rw & REQ_SYNC)
- rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
- rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
- u64_stats_update_end(&rwstat->syncp);
+ __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_rwstat_read - read the current values of a blkg_rwstat
* @rwstat: blkg_rwstat to read
*
- * Read the current snapshot of @rwstat and return it as the return value.
- * This function can be called without synchronization and takes care of
- * u64 atomicity.
+ * Read the current snapshot of @rwstat and return it in the aux counts.
*/
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
{
- unsigned int start;
- struct blkg_rwstat tmp;
-
- do {
- start = u64_stats_fetch_begin_irq(&rwstat->syncp);
- tmp = *rwstat;
- } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
+ struct blkg_rwstat result;
+ int i;
- return tmp;
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_set(&result.aux_cnt[i],
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
+ return result;
}
/**
@@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
- return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+ return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
}
/**
@@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
*/
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
- memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
}
/**
- * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
* @to: the destination blkg_rwstat
* @from: the source
*
- * Add @from's counts to @to.
+ * Add @from's count including the aux one to @to's aux count.
*/
-static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
+static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
{
struct blkg_rwstat v = blkg_rwstat_read(from);
int i;
- u64_stats_update_begin(&to->syncp);
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- to->cnt[i] += v.cnt[i];
- u64_stats_update_end(&to->syncp);
+ atomic64_add(atomic64_read(&v.aux_cnt[i]) +
+ atomic64_read(&from->aux_cnt[i]),
+ &to->aux_cnt[i]);
+}
+
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio);
+#else
+static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+ struct bio *bio) { return false; }
+#endif
+
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ bool throtl = false;
+
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ throtl = blk_throtl_bio(q, blkg, bio);
+
+ if (!throtl) {
+ blkg = blkg ?: q->root_blkg;
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
+ bio->bi_iter.bi_size);
+ blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
+ }
+
+ rcu_read_unlock();
+ return !throtl;
}
#else /* CONFIG_BLK_CGROUP */
@@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+ struct bio *bio) { return true; }
+
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 37d1602c4..5e7d43ab6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -145,7 +145,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
- void *priv);
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 74b4ec5f8..0ceccbc77 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
struct block_device;
struct io_context;
struct cgroup_subsys_state;
-typedef void (bio_end_io_t) (struct bio *, int);
+typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
/*
@@ -46,7 +46,8 @@ struct bvec_iter {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned long bi_flags; /* status, command, etc */
+ unsigned int bi_flags; /* status, command, etc */
+ int bi_error;
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
@@ -111,17 +112,15 @@ struct bio {
/*
* bio flags
*/
-#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
#define BIO_USER_MAPPED 4 /* contains user pages */
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
-#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
-#define BIO_TOI 10 /* bio is TuxOnIce submitted */
+#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
+#define BIO_TOI 9 /* bio is TuxOnIce submitted */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -130,14 +129,12 @@ struct bio {
#define BIO_RESET_BITS 14
#define BIO_OWNS_VEC 14 /* bio_free() should free bvec */
-#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
-
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
-#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ccf2b6bf9..19c2e947d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -43,7 +43,7 @@ struct blk_flush_queue;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 3
+#define BLKCG_MAX_POLS 2
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-struct bvec_merge_data {
- struct block_device *bi_bdev;
- sector_t bi_sector;
- unsigned bi_size;
- unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
- struct bio_vec *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
+ unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
+ unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
- merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,9 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
+ struct bio_set *bio_split;
+
+ bool mq_sysfs_init_done;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +482,6 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -591,7 +586,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
/*
* Driver can handle struct request, if it either has an old style
@@ -782,6 +777,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_queue_split(struct request_queue *, struct bio **,
+ struct bio_set *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +983,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1135,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
+ BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
@@ -1154,6 +1152,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
return q->limits.seg_boundary_mask;
}
+static inline unsigned long queue_virt_boundary(struct request_queue *q)
+{
+ return q->limits.virt_boundary_mask;
+}
+
static inline unsigned int queue_max_sectors(struct request_queue *q)
{
return q->limits.max_sectors;
@@ -1354,6 +1357,39 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
+static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
+ struct bio *next)
+{
+ if (!bio_has_data(prev))
+ return false;
+
+ return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
+ next->bi_io_vec[0].bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, bio, req->bio);
+}
+
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1480,6 +1516,26 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1546,6 +1602,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1555,8 +1621,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t,
- void **, unsigned long *pfn, long size);
+ long (*direct_access)(struct block_device *, sector_t, void __pmem **,
+ unsigned long *pfn);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1574,8 +1640,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
- unsigned long *pfn, long size);
+extern long bdev_direct_access(struct block_device *, sector_t,
+ void __pmem **addr, unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0..f57d7fed9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/perf_event.h>
struct bpf_map;
@@ -24,6 +25,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+ void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
bool owner_jited;
union {
char value[0] __aligned(8);
- struct bpf_prog *prog[0] __aligned(8);
+ void *ptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 4763ad64e..f89b31d45 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9ebee53d3..397c5cd09 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -46,6 +46,7 @@ struct ceph_options {
unsigned long mount_timeout; /* jiffies */
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
+ unsigned long monc_ping_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@ struct ceph_options {
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 377532789..b2371d9b5 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -238,6 +238,8 @@ struct ceph_connection {
bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
/* message in temps */
struct ceph_msg_header in_hdr;
@@ -248,6 +250,8 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
+ struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
};
@@ -285,6 +289,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
extern void ceph_con_keepalive(struct ceph_connection *con);
+extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
+ unsigned long interval);
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
size_t length, size_t alignment);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 1c1887206..0fe2656ac 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_MSG 7 /* message */
#define CEPH_MSGR_TAG_ACK 8 /* message ack */
#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
-#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
+#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
+#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
+#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
/*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 430c876ad..8492721b3 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -34,12 +34,17 @@ struct seq_file;
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
+#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
+ __unused_tag_ ## _t = CGROUP_ ## _t - 1,
enum cgroup_subsys_id {
#include <linux/cgroup_subsys.h>
CGROUP_SUBSYS_COUNT,
};
+#undef SUBSYS_TAG
#undef SUBSYS
+#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
+
/* bits in struct cgroup_subsys_state flags field */
enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@ struct cftype {
* end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
- int private;
+ unsigned long private;
/*
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
@@ -406,7 +411,9 @@ struct cgroup_subsys {
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
- void (*fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task, void **priv_p);
+ void (*cancel_fork)(struct task_struct *task, void *priv);
+ void (*fork)(struct task_struct *task, void *priv);
void (*exit)(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task);
@@ -434,6 +441,9 @@ struct cgroup_subsys {
int id;
const char *name;
+ /* optional, initialized automatically during boot if not set */
+ const char *legacy_name;
+
/* link to parent, protected by cgroup_lock() */
struct cgroup_root *root;
@@ -468,6 +478,7 @@ void cgroup_threadgroup_change_end(struct task_struct *tsk);
#else /* CONFIG_CGROUPS */
+#define CGROUP_CANFORK_COUNT 0
#define CGROUP_SUBSYS_COUNT 0
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a593e2991..eb7ca55f7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -22,6 +22,15 @@
#ifdef CONFIG_CGROUPS
+/*
+ * All weight knobs on the default hierarhcy should use the following min,
+ * default and max values. The default value is the logarithmic center of
+ * MIN and MAX and allows 100x to be expressed in both directions.
+ */
+#define CGROUP_WEIGHT_MIN 1
+#define CGROUP_WEIGHT_DFL 100
+#define CGROUP_WEIGHT_MAX 10000
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -62,7 +71,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_post_fork(struct task_struct *p,
+ void *old_ss_priv[CGROUP_CANFORK_COUNT]);
void cgroup_exit(struct task_struct *p);
int cgroup_init_early(void);
@@ -524,7 +538,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT])
+{ return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e4a96fb14..81ad8a047 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
+
+/*
+ * This file *must* be included with SUBSYS() defined.
+ * SUBSYS_TAG() is a noop if undefined.
+ */
+
+#ifndef SUBSYS_TAG
+#define __TMP_SUBSYS_TAG
+#define SUBSYS_TAG(_x)
+#endif
+
#if IS_ENABLED(CONFIG_CPUSETS)
SUBSYS(cpuset)
#endif
@@ -16,7 +27,7 @@ SUBSYS(cpuacct)
#endif
#if IS_ENABLED(CONFIG_BLK_CGROUP)
-SUBSYS(blkio)
+SUBSYS(io)
#endif
#if IS_ENABLED(CONFIG_MEMCG)
@@ -35,6 +46,10 @@ SUBSYS(freezer)
SUBSYS(net_cls)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
+SUBSYS(bfqio)
+#endif
+
#if IS_ENABLED(CONFIG_CGROUP_PERF)
SUBSYS(perf_event)
#endif
@@ -48,11 +63,28 @@ SUBSYS(hugetlb)
#endif
/*
+ * Subsystems that implement the can_fork() family of callbacks.
+ */
+SUBSYS_TAG(CANFORK_START)
+
+#if IS_ENABLED(CONFIG_CGROUP_PIDS)
+SUBSYS(pids)
+#endif
+
+SUBSYS_TAG(CANFORK_END)
+
+/*
* The following subsystems are not supported on the default hierarchy.
*/
#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
SUBSYS(debug)
#endif
+
+#ifdef __TMP_SUBSYS_TAG
+#undef __TMP_SUBSYS_TAG
+#undef SUBSYS_TAG
+#endif
+
/*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 78842f46f..3ecc07d0d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,11 +32,34 @@
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
+struct clk;
struct clk_hw;
struct clk_core;
struct dentry;
/**
+ * struct clk_rate_request - Structure encoding the clk constraints that
+ * a clock user might require.
+ *
+ * @rate: Requested clock rate. This field will be adjusted by
+ * clock drivers according to hardware capabilities.
+ * @min_rate: Minimum rate imposed by clk users.
+ * @max_rate: Maximum rate a imposed by clk users.
+ * @best_parent_rate: The best parent rate a parent can provide to fulfill the
+ * requested constraints.
+ * @best_parent_hw: The most appropriate parent clock that fulfills the
+ * requested constraints.
+ *
+ */
+struct clk_rate_request {
+ unsigned long rate;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned long best_parent_rate;
+ struct clk_hw *best_parent_hw;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -176,12 +198,8 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
- long (*determine_rate)(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw);
+ int (*determine_rate)(struct clk_hw *hw,
+ struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@ struct clk_div_table {
* to the closest integer instead of the up one.
* CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
* not be changed by the clock framework.
+ * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
+ * except when the value read from the register is zero, the divisor is
+ * 2^width of the field.
*/
struct clk_divider {
struct clk_hw hw;
@@ -360,6 +381,7 @@ struct clk_divider {
#define CLK_DIVIDER_HIWORD_MASK BIT(3)
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
+#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
@@ -550,6 +572,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
void of_gpio_clk_gate_setup(struct device_node *node);
/**
+ * struct clk_gpio_mux - gpio controlled clock multiplexer
+ *
+ * @hw: see struct clk_gpio
+ * @gpiod: gpio descriptor to select the parent of this clock multiplexer
+ *
+ * Clock with a gpio control for selecting the parent clock.
+ * Implements .get_parent, .set_parent and .determine_rate
+ */
+
+extern const struct clk_ops clk_gpio_mux_ops;
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags);
+
+void of_gpio_mux_clk_setup(struct device_node *node);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -568,31 +607,27 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
+const char *clk_hw_get_name(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
-u8 __clk_get_num_parents(struct clk *clk);
-struct clk *__clk_get_parent(struct clk *clk);
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
+ unsigned int index);
unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned long __clk_get_rate(struct clk *clk);
+unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
-bool __clk_is_prepared(struct clk *clk);
+unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+bool clk_hw_is_prepared(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
-unsigned long __clk_determine_rate(struct clk_hw *core,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate);
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p);
+int __clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ unsigned long max_rate);
static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
{
@@ -603,7 +638,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
/*
* FIXME clock api without lock protection
*/
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index f3050e15f..e0c362363 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
struct device_node;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 63a8159c4..cb19cc186 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
#include <linux/types.h>
+struct device;
+struct device_node;
+struct generic_pm_domain;
+
void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+void cpg_mstp_add_clk_domain(struct device_node *np);
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#else
+static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#endif
+
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 19c4208f4..57bf7aab4 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
#ifndef __LINUX_CLK_TEGRA_H_
#define __LINUX_CLK_TEGRA_H_
-#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/bug.h>
/*
* Tegra CPU clock and reset control ops
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 79b76e13d..223be696d 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -188,33 +188,6 @@ struct clk_hw_omap {
/* DPLL Type and DCO Selection Flags */
#define DPLL_J_TYPE 0x1
-/* Composite clock component types */
-enum {
- CLK_COMPONENT_TYPE_GATE = 0,
- CLK_COMPONENT_TYPE_DIVIDER,
- CLK_COMPONENT_TYPE_MUX,
- CLK_COMPONENT_TYPE_MAX,
-};
-
-/**
- * struct ti_dt_clk - OMAP DT clock alias declarations
- * @lk: clock lookup definition
- * @node_name: clock DT node to map to
- */
-struct ti_dt_clk {
- struct clk_lookup lk;
- char *node_name;
-};
-
-#define DT_CLK(dev, con, name) \
- { \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- }, \
- .node_name = name, \
- }
-
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
CLK_MAX_MEMMAPS
};
-typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
-
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
};
/**
- * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clkdm_clk_enable: pointer to clockdomain enable function
+ * @clkdm_clk_disable: pointer to clockdomain disable function
+ * @cm_wait_module_ready: pointer to CM module wait ready function
+ * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
*
- * Low-level register access ops are generally used by the basic clock types
- * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
- * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
- * used by other hardware-specific clock drivers if needed.
+ * Low-level ops are generally used by the basic clock types (clk-gate,
+ * clk-mux, clk-divider etc.) to provide support for various low-level
+ * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
+ * by board code. Low-level ops also contain some other platform specific
+ * operations not provided directly by clock drivers.
*/
struct ti_clk_ll_ops {
u32 (*clk_readl)(void __iomem *reg);
void (*clk_writel)(u32 val, void __iomem *reg);
+ int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
+ int (*clkdm_clk_disable)(struct clockdomain *clkdm,
+ struct clk *clk);
+ int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
+ u8 idlest_shift);
+ int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+ u8 *idlest_reg_id);
};
-extern struct ti_clk_ll_ops *ti_clk_ll_ops;
-
-extern const struct clk_ops ti_clk_divider_ops;
-extern const struct clk_ops ti_clk_mux_ops;
-
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
-int omap3_noncore_dpll_enable(struct clk_hw *hw);
-void omap3_noncore_dpll_disable(struct clk_hw *hw);
-int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate,
- u8 index);
-long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
-long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk);
-u8 omap2_init_dpll_parent(struct clk_hw *hw);
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
void omap2_init_clk_clkdm(struct clk_hw *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
- unsigned long parent_rate);
-int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate);
-long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
-int omap2_clkops_enable_clkdm(struct clk_hw *hw);
-void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
-int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
- unsigned long parent_rate);
-int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate, u8 index);
-int omap2_dflt_clk_enable(struct clk_hw *hw);
-void omap2_dflt_clk_disable(struct clk_hw *hw);
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
-void omap3_clk_lock_dpll5(void);
+int omap2_clk_enable_autoidle_all(void);
+int omap2_clk_allow_idle(struct clk *clk);
+int omap2_clk_deny_idle(struct clk *clk);
unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
unsigned long parent_rate);
int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
void omap2xxx_clkt_vps_init(void);
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
-void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
-void ti_dt_clocks_register(struct ti_dt_clk *oclks);
-void ti_dt_clk_init_provider(struct device_node *np, int index);
void ti_dt_clk_init_retry_clks(void);
void ti_dt_clockdomains_setup(void);
-int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
- ti_of_clk_init_cb_t func);
-int of_ti_clk_autoidle_setup(struct device_node *node);
-int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
+
+struct regmap;
+
+int omap2_clk_provider_init(struct device_node *parent, int index,
+ struct regmap *syscon, void __iomem *mem);
+void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
int omap3430_dt_clk_init(void);
int omap3630_dt_clk_init(void);
int am35xx_dt_clk_init(void);
-int ti81xx_dt_clk_init(void);
+int dm814x_dt_clk_init(void);
+int dm816x_dt_clk_init(void);
int omap4xxx_dt_clk_init(void);
int omap5xxx_dt_clk_init(void);
int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
int omap2420_dt_clk_init(void);
int omap2430_dt_clk_init(void);
-#ifdef CONFIG_OF
-void of_ti_clk_allow_autoidle_all(void);
-void of_ti_clk_deny_autoidle_all(void);
-#else
-static inline void of_ti_clk_allow_autoidle_all(void) { }
-static inline void of_ti_clk_deny_autoidle_all(void) { }
-#endif
+struct ti_clk_features {
+ u32 flags;
+ long fint_min;
+ long fint_max;
+ long fint_band1_max;
+ long fint_band2_min;
+ u8 dpll_bypass_vals;
+ u8 cm_idlest_val;
+};
+
+#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
+#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
+#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
+
+void ti_clk_setup_features(struct ti_clk_features *features);
+const struct ti_clk_features *ti_clk_get_features(void);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
-extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
-extern const struct clk_hw_omap_ops clkhwops_iclk;
-extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
#ifdef CONFIG_ATAGS
int omap3430_clk_legacy_init(void);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 597a1e836..bdcf358df 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -18,15 +18,6 @@
struct clock_event_device;
struct module;
-/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
-enum clock_event_mode {
- CLOCK_EVT_MODE_UNUSED,
- CLOCK_EVT_MODE_SHUTDOWN,
- CLOCK_EVT_MODE_PERIODIC,
- CLOCK_EVT_MODE_ONESHOT,
- CLOCK_EVT_MODE_RESUME,
-};
-
/*
* Possible states of a clock event device.
*
@@ -86,16 +77,14 @@ enum clock_event_state {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
- * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
* @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
- * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
- * @set_state_periodic: switch state to periodic, if !set_mode
- * @set_state_oneshot: switch state to oneshot, if !set_mode
- * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
- * @set_state_shutdown: switch state to shutdown, if !set_mode
- * @tick_resume: resume clkevt device, if !set_mode
+ * @set_state_periodic: switch state to periodic
+ * @set_state_oneshot: switch state to oneshot
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped
+ * @set_state_shutdown: switch state to shutdown
+ * @tick_resume: resume clkevt device
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
u64 min_delta_ns;
u32 mult;
u32 shift;
- enum clock_event_mode mode;
enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
- /*
- * State transition callback(s): Only one of the two groups should be
- * defined:
- * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
- * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
- */
- void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
int (*set_state_oneshot_stopped)(struct clock_event_device *);
@@ -234,13 +215,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
-extern int clockevents_notify(unsigned long reason, void *arg);
-
#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
diff --git a/include/linux/cma.h b/include/linux/cma.h
index f7ef093ec..29f9e774a 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3e9..8efb40e61 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,12 +237,25 @@
#define KASAN_ABI_VERSION 3
#endif
+#if GCC_VERSION >= 40902
+/*
+ * Tell the compiler that address safety instrumentation (KASAN)
+ * should not be applied to that function.
+ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ */
+#define __no_sanitize_address __attribute__((no_sanitize_address))
+#endif
+
#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
+#if !defined(__no_sanitize_address)
+#define __no_sanitize_address
+#endif
+
/*
* A trick to suppress uninitialized variable warning without generating any
* code
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae7c..3d7810341 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -198,20 +198,46 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h>
-static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+#define __READ_ONCE_SIZE \
+({ \
+ switch (size) { \
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
+ default: \
+ barrier(); \
+ __builtin_memcpy((void *)res, (const void *)p, size); \
+ barrier(); \
+ } \
+})
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
{
- switch (size) {
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
- default:
- barrier();
- __builtin_memcpy((void *)res, (const void *)p, size);
- barrier();
- }
+ __READ_ONCE_SIZE;
}
+#ifdef CONFIG_KASAN
+/*
+ * This function is not 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+static __no_sanitize_address __maybe_unused
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+ __READ_ONCE_SIZE;
+}
+#else
+static __always_inline
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+ __READ_ONCE_SIZE;
+}
+#endif
+
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
@@ -248,11 +274,30 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering.
*/
-#define READ_ONCE(x) \
- ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+#define __READ_ONCE(x, check) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u; \
+ if (check) \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ else \
+ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+ * to hide memory access from KASAN.
+ */
+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
#define WRITE_ONCE(x, val) \
- ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
/**
* READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b96bd2999..008fc67d0 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -49,13 +49,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
}
}
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static inline enum ctx_state ct_state(void)
+{
+ return context_tracking_is_enabled() ?
+ this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
+static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 678ecdf90..ee956c528 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -14,6 +14,7 @@ struct context_tracking {
bool active;
int recursion;
enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 3486b9082..c69e1b932 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/sched.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
+#ifdef CONFIG_PID_NS
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid)
+{
+ struct task_struct *task = NULL;
+ unsigned long pid = 0;
+
+ rcu_read_lock();
+ task = find_task_by_vpid(vpid);
+ if (task)
+ pid = task_pid_nr(task);
+ rcu_read_unlock();
+
+ return pid;
+}
+#else
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
+#endif
+
#endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index c4d4eb8ac..986c06c88 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <asm/cpufeature.h>
@@ -43,16 +44,16 @@
* For a list of legal values for 'feature', please consult the file
* 'asm/cpufeature.h' of your favorite architecture.
*/
-#define module_cpu_feature_match(x, __init) \
+#define module_cpu_feature_match(x, __initfunc) \
static struct cpu_feature const cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
-static int cpu_feature_match_ ## x ## _init(void) \
+static int __init cpu_feature_match_ ## x ## _init(void) \
{ \
if (!cpu_have_feature(cpu_feature(x))) \
return -ENODEV; \
- return __init(); \
+ return __initfunc(); \
} \
module_init(cpu_feature_match_ ## x ## _init)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e567b..dca22de98 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_real_policy {
+struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
- unsigned int policy; /* see above */
- struct cpufreq_governor *governor; /* see below */
};
struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_real_policy user_policy;
+ struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table;
struct list_head policy_list;
@@ -129,9 +127,14 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
+static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ return NULL;
+}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
@@ -369,11 +372,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
-#define CPUFREQ_CREATE_POLICY (4)
-#define CPUFREQ_REMOVE_POLICY (5)
+#define CPUFREQ_NOTIFY (1)
+#define CPUFREQ_START (2)
+#define CPUFREQ_CREATE_POLICY (3)
+#define CPUFREQ_REMOVE_POLICY (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +580,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_supported(void);
int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -591,12 +595,23 @@ static inline int cpufreq_boost_enabled(void)
{
return 0;
}
+
+static inline int cpufreq_enable_boost_support(void)
+{
+ return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ return false;
+}
#endif
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d3427..786ad3263 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
struct list_head device_list;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
- int safe_state_index;
cpumask_t coupled_cpus;
struct cpuidle_coupled *coupled;
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083e6..8d70e1361 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@ struct cred {
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
+ kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
}
#endif
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+ return cap_issubset(cred->cap_ambient,
+ cap_intersect(cred->cap_permitted,
+ cred->cap_inheritable));
+}
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938b0..e71cb70a1 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
- * Temporary flag used to prevent legacy AEAD implementations from
- * being used by user-space.
- */
-#define CRYPTO_ALG_AEAD_NEW 0x00004000
-
-/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -142,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
-struct aead_request;
-struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@ struct ablkcipher_alg {
};
/**
- * struct old_aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- * transformation. A transformation may support smaller tag sizes.
- * As the authentication tag is a message digest to ensure the
- * integrity of the encrypted data, a consumer typically wants the
- * largest authentication tag possible as defined by this
- * variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- * function is used to specify the consumer requested size of the
- * authentication tag to be either generated by the transformation
- * during encryption or the size of the authentication tag to be
- * supplied during the decryption operation. This function is also
- * responsible for checking the authentication tag size for
- * validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct old_aead_alg {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int ivsize;
- unsigned int maxauthsize;
-};
-
-/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
* @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
#define cra_ablkcipher cra_u.ablkcipher
-#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
diff --git a/include/linux/dax.h b/include/linux/dax.h
new file mode 100644
index 000000000..b415e5215
--- /dev/null
+++ b/include/linux/dax.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_DAX_H
+#define _LINUX_DAX_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned int flags, get_block_t, dax_iodone_t);
+#else
+static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, unsigned int flags, get_block_t gb,
+ dax_iodone_t di)
+{
+ return VM_FAULT_FALLBACK;
+}
+#define __dax_pmd_fault dax_pmd_fault
+#endif
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
+#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
+
+static inline bool vma_is_dax(struct vm_area_struct *vma)
+{
+ return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+}
+#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 420311bce..9beb636b9 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
bool debugfs_initialized(void);
+ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
#else
#include <linux/err.h>
@@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline ssize_t debugfs_read_file_bool(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index ce447f0f1..68030e22a 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -65,7 +65,10 @@ struct devfreq_dev_status {
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
* @get_dev_status: The device should provide the current performance
- * status to devfreq, which is used by governors.
+ * status to devfreq. Governors are recommended not to
+ * use this directly. Instead, governors are recommended
+ * to use devfreq_update_stats() along with
+ * devfreq.last_status.
* @get_cur_freq: The device should provide the current frequency
* at which it is operating.
* @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
struct delayed_work work;
unsigned long previous_freq;
+ struct devfreq_dev_status last_status;
void *data; /* private data for governors */
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
+/**
+ * devfreq_update_stats() - update the last_status pointer in struct devfreq
+ * @df: the devfreq instance whose status needs updating
+ *
+ * Governors are recommended to use this function along with last_status,
+ * which allows other entities to reuse the last_status without affecting
+ * the values fetched later by governors.
+ */
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
}
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51cc1deb7..76d23fa8c 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
unsigned long arg);
-typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size);
-
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_ioctl_fn ioctl;
- dm_merge_fn merge;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
diff --git a/include/linux/device.h b/include/linux/device.h
index a2b4ea70a..5d7bc6349 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,7 +341,7 @@ struct subsys_interface {
struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
- int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+ void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};
int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@ struct device_dma_parameters {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_list: Hosts MSI descriptors
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain;
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct list_head msi_list;
+#endif
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@@ -861,6 +869,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
@@ -959,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
+extern int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 569bbd039..fec734df1 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
return ret;
}
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order)
{
return NULL;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb419..7ea9184ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@ enum dma_transaction_type {
DMA_XOR_VAL,
DMA_PQ_VAL,
DMA_MEMSET,
+ DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -183,6 +184,8 @@ struct dma_interleaved_template {
* operation it continues the calculation with new sources
* @DMA_PREP_FENCE - tell the driver that subsequent operations depend
* on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@ enum dma_ctrl_flags {
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
};
/**
@@ -400,6 +404,8 @@ enum dma_residue_granularity {
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -408,6 +414,7 @@ struct dma_slave_caps {
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@ struct dma_async_tx_descriptor {
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@ struct dma_tx_state {
};
/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@ struct dma_tx_state {
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
* @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
- u8 copy_align;
- u8 xor_align;
- u8 pq_align;
- u8 fill_align;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -682,6 +705,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
return desc->tx_submit(desc);
}
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -1155,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+
+ dma_get_slave_caps(tx->chan, &caps);
+
+ if (caps.descriptor_reuse) {
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+ } else {
+ return -EPERM;
+ }
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (dmaengine_desc_test_reuse(desc))
+ return desc->desc_free(desc);
+ else
+ return -EPERM;
+}
+
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
static inline struct dma_chan
*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
- struct device *dev, char *name)
+ struct device *dev, const char *name)
{
struct dma_chan *chan;
@@ -1177,6 +1237,9 @@ static inline struct dma_chan
if (chan)
return chan;
+ if (!fn || !fn_param)
+ return NULL;
+
return __dma_request_channel(mask, fn, fn_param);
}
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index e1043f791..53ba73750 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f8775..eb049c622 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
- ((a[2] ^ b[2]) & m)) == 0;
+ (__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b16d929fa..c0f8c4fc5 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -27,8 +27,6 @@
#define __LINUX_EXTCON_H__
#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/sysfs.h>
/*
* Define the unique id of supported external connectors
@@ -77,8 +75,6 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_state: An optional callback to override the method to print the
- * status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
@@ -102,9 +98,6 @@ struct extcon_dev {
const unsigned int *supported_cable;
const u32 *mutually_exclusive;
- /* Optional callbacks to override class functions */
- ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
-
/* Internal data. Please do not set. */
struct device dev;
struct raw_notifier_head *nh;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a21..25c6324a0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK 214
-
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+/*
+ * space utilization of regular dentry and inline dentry
+ * regular dentry inline dentry
+ * bitmap 1 * 27 = 27 1 * 23 = 23
+ * reserved 1 * 3 = 3 1 * 7 = 7
+ * dentry 11 * 214 = 2354 11 * 182 = 2002
+ * filename 8 * 214 = 1712 8 * 182 = 1456
+ * total 4096 3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 043f3283b..bc9afa74e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -788,7 +788,7 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb887406..674e3e226 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+ !lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd);
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea..fa2cab985 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
offsetof(struct bpf_prog, insns[proglen]));
}
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+ /* When classic BPF programs have been loaded and the arch
+ * does not have a classic BPF JIT (anymore), they have been
+ * converted via bpf_migrate_filter() to eBPF and thus always
+ * have an unspec program type.
+ */
+ return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
- flen, proglen, pass, image);
+ pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ proglen, pass, image, current->comm, task_pid_nr(current));
+
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 91dcc6100..31a031e9f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,7 +1,6 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
-
#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/blk_types.h>
+#include <linux/workqueue.h>
+#include <linux/percpu-rwsem.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -51,7 +52,6 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -636,7 +636,7 @@ struct inode {
unsigned long dirtied_time_when;
struct hlist_node i_hash;
- struct list_head i_wb_list; /* backing dev IO list */
+ struct list_head i_io_list; /* backing dev IO list */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *i_wb; /* the associated cgroup wb */
@@ -943,12 +943,18 @@ struct lock_manager_operations {
struct lock_manager {
struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
};
struct net;
void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
int locks_in_grace(struct net *);
+int opens_in_grace(struct net *);
/* that will die - we need it for nfs_lock_info */
#include <linux/nfs_fs_i.h>
@@ -1262,6 +1268,7 @@ extern struct list_head super_blocks;
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
/* Possible states of 'frozen' field */
enum {
@@ -1276,16 +1283,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- /* Counters for counting writers at each level */
- struct percpu_counter counter[SB_FREEZE_LEVELS];
- wait_queue_head_t wait; /* queue for waiting for
- writers / faults to finish */
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* queue for waiting for
- sb to be thawed */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map lock_map[SB_FREEZE_LEVELS];
-#endif
+ int frozen; /* Is sb frozen? */
+ wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct super_block {
@@ -1311,7 +1311,6 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
- struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1377,11 +1376,18 @@ struct super_block {
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
/*
* Indicates how deep in a filesystem stack this SB is
*/
int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1393,6 +1399,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
void __sb_end_write(struct super_block *sb, int level);
int __sb_start_write(struct super_block *sb, int level, bool wait);
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1613,7 +1624,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -2625,7 +2635,7 @@ static inline void insert_inode_hash(struct inode *inode)
extern void __remove_inode_hash(struct inode *);
static inline void remove_inode_hash(struct inode *inode)
{
- if (!inode_unhashed(inode))
+ if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
@@ -2684,19 +2694,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
- get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
-
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
@@ -3058,4 +3055,6 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
+extern bool path_noexec(const struct path *path);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 2a2f56b29..f29129141 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -20,11 +20,6 @@
#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
PHY CLK to become stable - 10ms*/
#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
-#define FSL_USB_VER_OLD 0
-#define FSL_USB_VER_1_6 1
-#define FSL_USB_VER_2_2 2
-#define FSL_USB_VER_2_4 3
-#define FSL_USB_VER_2_5 4
#include <linux/types.h>
@@ -52,6 +47,15 @@
*
*/
+enum fsl_usb2_controller_ver {
+ FSL_USB_VER_NONE = -1,
+ FSL_USB_VER_OLD = 0,
+ FSL_USB_VER_1_6 = 1,
+ FSL_USB_VER_2_2 = 2,
+ FSL_USB_VER_2_4 = 3,
+ FSL_USB_VER_2_5 = 4,
+};
+
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@ enum fsl_usb2_phy_modes {
FSL_USB2_PHY_UTMI,
FSL_USB2_PHY_UTMI_WIDE,
FSL_USB2_PHY_SERIAL,
+ FSL_USB2_PHY_UTMI_DUAL,
};
struct clk;
@@ -72,7 +77,7 @@ struct platform_device;
struct fsl_usb2_platform_data {
/* board specific information */
- int controller_ver;
+ enum fsl_usb2_controller_ver controller_ver;
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
@@ -93,6 +98,9 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_a005275:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321eab..0023088b2 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
+ bool little_endian;
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+static inline u32 ifc_in32(void __iomem *addr)
+{
+ u32 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread32(addr);
+ else
+ val = ioread32be(addr);
+
+ return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+ u16 val;
+
+ if (fsl_ifc_ctrl_dev->little_endian)
+ val = ioread16(addr);
+ else
+ val = ioread16be(addr);
+
+ return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+ if (fsl_ifc_ctrl_dev->little_endian)
+ iowrite16(val, addr);
+ else
+ iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+ iowrite8(val, addr);
+}
#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517dd3..533c44085 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
- * a mark is simply an object attached to an in core inode which allows an
+ * A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
- * these are flushed when an inode is evicted from core and may be flushed
- * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
- * (such as dnotify) will flush these when the open fd is closed and not at
- * inode eviction or modification.
+ * These are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify
+ * users (such as dnotify) will flush these when the open fd is closed and not
+ * at inode eviction or modification.
+ *
+ * Text in brackets is showing the lock(s) protecting modifications of a
+ * particular entry. obj_lock means either inode->i_lock or
+ * mnt->mnt_root->d_lock depending on the mark type.
*/
struct fsnotify_mark {
- __u32 mask; /* mask this mark is for */
- /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ /* Mask this mark is for [mark->lock, group->mark_mutex] */
+ __u32 mask;
+ /* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt; /* active things looking at this mark */
- struct fsnotify_group *group; /* group this mark is for */
- struct list_head g_list; /* list of marks by group->i_fsnotify_marks
- * Also reused for queueing mark into
- * destroy_list when it's waiting for
- * the end of SRCU period before it can
- * be freed */
- spinlock_t lock; /* protect group and inode */
- struct hlist_node obj_list; /* list of marks for inode / vfsmount */
- struct list_head free_list; /* tmp list used when freeing this mark */
- union {
+ atomic_t refcnt;
+ /* Group this mark is for. Set on mark creation, stable until last ref
+ * is dropped */
+ struct fsnotify_group *group;
+ /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ * mark into destroy_list when it's waiting for the end of SRCU period
+ * before it can be freed. [group->mark_mutex] */
+ struct list_head g_list;
+ /* Protects inode / mnt pointers, flags, masks */
+ spinlock_t lock;
+ /* List of marks for inode / vfsmount [obj_lock] */
+ struct hlist_node obj_list;
+ union { /* Object pointer [mark->lock, group->mark_mutex] */
struct inode *inode; /* inode this mark is associated with */
struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
- __u32 ignored_mask; /* events types to ignore */
+ /* Events types to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignored_mask;
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
- unsigned int flags; /* vfsmount or inode mark? */
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
+ unsigned int flags; /* flags [mark->lock] */
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
@@ -345,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
-extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
- struct fsnotify_group *group);
+/* detach mark from inode / mount list, group list, drop inode reference */
+extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
+/* free mark */
+extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct list_head *list);
+extern void fsnotify_unmount_inodes(struct super_block *sb);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
return 0;
}
-static inline void fsnotify_unmount_inodes(struct list_head *list)
+static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
#endif /* CONFIG_FSNOTIFY */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb139..7ff168d06 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@ struct gen_pool {
genpool_algo_t algo; /* allocation function */
void *data;
+
+ const char *name;
};
/*
@@ -118,8 +120,8 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
- int min_alloc_order, int nid);
-extern struct gen_pool *gen_pool_get(struct device *dev);
+ int min_alloc_order, int nid, const char *name);
+extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ec274e0f4..2adbfa6d0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -13,6 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
#ifdef CONFIG_BLOCK
@@ -124,7 +125,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
- atomic_t ref;
+ struct percpu_ref ref;
struct rcu_head rcu_head;
};
@@ -611,7 +612,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
sector_t len, int flags,
struct partition_meta_info
*info);
-extern void __delete_partition(struct hd_struct *);
+extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
@@ -640,27 +641,39 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline void hd_ref_init(struct hd_struct *part)
+static inline int hd_ref_init(struct hd_struct *part)
{
- atomic_set(&part->ref, 1);
- smp_mb();
+ if (percpu_ref_init(&part->ref, __delete_partition, 0,
+ GFP_KERNEL))
+ return -ENOMEM;
+ return 0;
}
static inline void hd_struct_get(struct hd_struct *part)
{
- atomic_inc(&part->ref);
- smp_mb__after_atomic();
+ percpu_ref_get(&part->ref);
}
static inline int hd_struct_try_get(struct hd_struct *part)
{
- return atomic_inc_not_zero(&part->ref);
+ return percpu_ref_tryget_live(&part->ref);
}
static inline void hd_struct_put(struct hd_struct *part)
{
- if (atomic_dec_and_test(&part->ref))
- __delete_partition(part);
+ percpu_ref_put(&part->ref);
+}
+
+static inline void hd_struct_kill(struct hd_struct *part)
+{
+ percpu_ref_kill(&part->ref);
+}
+
+static inline void hd_free_part(struct hd_struct *part)
+{
+ free_part_stats(part);
+ free_part_info(part);
+ percpu_ref_exit(&part->ref);
}
/*
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index d332d6165..6007bb265 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -64,7 +64,10 @@ struct vm_area_struct;
* but it is definitely preferable to use the flag rather than opencode endless
* loop around allocator.
*
- * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
+ * return NULL when direct reclaim and memory compaction have failed to allow
+ * the allocation to succeed. The OOM killer is not called with the current
+ * implementation.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
@@ -302,22 +305,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order)
+/*
+ * Allocate pages, preferring the node given as nid. The node must be valid and
+ * online. For more general interface, see alloc_pages_node().
+ */
+static inline struct page *
+__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
- /* Unknown node is current node */
- if (nid < 0)
- nid = numa_node_id();
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_WARN_ON(!node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
-static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+/*
+ * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
+ * prefer the current CPU's closest node. Otherwise node must be valid and
+ * online.
+ */
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
- return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+ return __alloc_pages_node(nid, gfp_mask, order);
}
#ifdef CONFIG_NUMA
@@ -356,7 +368,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
-/* This is different from alloc_pages_exact_node !!! */
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255ae..14cac67c2 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@ enum gpiod_flags {
int gpiod_count(struct device *dev, const char *con_id);
/* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
@@ -146,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
return 0;
}
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
- const char *con_id,
- enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
- enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
- unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -206,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
@@ -214,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
}
static inline
struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -223,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
#endif /* CONFIG_GPIOLIB */
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...) \
- __gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...) \
- __gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...) \
- __devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) \
- __devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
- __devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
- __devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd4d..1aed31c5f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
struct device;
@@ -64,6 +65,17 @@ struct seq_file;
* registers.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ * between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ * for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ * provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@ struct gpio_chip {
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
int irq_parent;
+ struct lock_class_key *lock_key;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
+ }) \
+)
+#else
+#define gpiochip_irqchip_add(...) \
+ _gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e2706140e..c0d712d22 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
}
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f10b20f05..ecb080d6f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
+int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
+ unsigned long pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
#endif
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
-extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
else
return 0;
}
-static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next)
-{
- if (!vma->anon_vma || vma->vm_ops)
- return;
- __vma_adjust_trans_huge(vma, start, end, adjust_next);
-}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
return ACCESS_ONCE(huge_zero_page) == page;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return is_huge_zero_page(pmd_page(pmd));
+}
+
+struct page *get_huge_zero_page(void);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d891f9494..5e35379f5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,9 @@ struct resv_map {
struct kref refs;
spinlock_t lock;
struct list_head regions;
+ long adds_in_progress;
+ struct list_head region_cache;
+ long region_cache_count;
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
-void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
+long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+ long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
+void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+extern struct mutex *hugetlb_fault_mutex_table;
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct address_space *mapping,
+ pgoff_t idx, unsigned long address);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
#endif
};
+struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t idx);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 30d3a1f79..54733d5b5 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ resource_size_t min, resource_size_t max,
+ resource_size_t size, resource_size_t align,
+ bool fb_overlap_ok);
+
/**
* VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
*
@@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
-extern struct resource hyperv_mmio;
-
/*
* Negotiated version with the Host.
*/
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e83a738a3..768063baa 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
+extern s32
+i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
#endif /* I2C */
/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
/*flags for the client struct: */
-#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
-#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
/* must call put_device() when done with returned i2c_adapter device */
extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
{
return NULL;
}
+
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897dc..cfa906f28 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad488d..908429216 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
+extern int sysctl_igmp_llm_reports;
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
extern int sysctl_igmp_qrv;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2c476acb8..3c17cd7fd 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
+ * @wai_addr: The address of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
* @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
*/
struct st_sensor_settings {
u8 wai;
+ u8 wai_addr;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
int num_ch;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 26fb8f634..fad58671c 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
/**
* iio_channel_cb_get_channels() - get access to the underlying channels.
- * @cb_buff: The callback buffer from whom we want the channel
+ * @cb_buffer: The callback buffer from whom we want the channel
* information.
*
* This function allows one to obtain information about the channels.
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 8a1d18640..9cd8f7472 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@ struct iio_chan_spec;
* struct iio_dev_attr - iio specific device attribute
* @dev_attr: underlying device attribute
* @address: associated register address
- * @l: list head for maintaining list of dynamically created attrs.
+ * @l: list head for maintaining list of dynamically created attrs
+ * @c: specification for the underlying channel
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index fa76c79a5..1c9e028e0 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@ struct iio_subirq {
bool enabled;
};
+struct iio_dev;
+struct iio_trigger;
+
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @owner: used to monitor usage count of the trigger.
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index c378ebec6..f72f70d5a 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@ struct iio_dev;
struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
- irqreturn_t (*pollfunc_bh)(int irq, void *p),
- irqreturn_t (*pollfunc_th)(int irq, void *p),
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index bb9b075f0..0b61b546c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -39,6 +39,14 @@ extern struct fs_struct init_fs;
#define INIT_CPUSET_SEQ(tsk)
#endif
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
+},
+#else
+#define INIT_PREV_CPUTIME(x)
+#endif
+
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -53,6 +61,7 @@ extern struct fs_struct init_fs;
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = 0, \
}, \
+ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_GROUP_RWSEM(sig) \
@@ -155,8 +164,6 @@ extern struct task_group root_task_group;
# define INIT_VTIME(tsk)
#endif
-#define INIT_TASK_COMM "swapper"
-
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
@@ -185,6 +192,78 @@ extern struct task_group root_task_group;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
+#ifdef CONFIG_SCHED_BFS
+#define INIT_TASK_COMM "BFS"
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .prio = NORMAL_PRIO, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = NORMAL_PRIO, \
+ .deadline = 0, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .time_slice = HZ, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_TASK_RCU_TASKS(tsk) \
+ INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
+ INIT_PREV_CPUTIME(tsk) \
+ INIT_VTIME(tsk) \
+ INIT_NUMA_BALANCING(tsk) \
+ INIT_KASAN(tsk) \
+}
+#else /* CONFIG_SCHED_BFS */
+#define INIT_TASK_COMM "swapper"
#define INIT_TASK(tsk) \
{ \
.state = 0, \
@@ -254,11 +333,12 @@ extern struct task_group root_task_group;
INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
+ INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
}
-
+#endif /* CONFIG_SCHED_BFS */
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea6c..c91e13761 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
#ifndef _TOUCHSCREEN_H
#define _TOUCHSCREEN_H
-#include <linux/input.h>
+struct input_dev;
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev,
- bool multitouch)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d24..6240063bd 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain **domains; /* ptr to domains */
+ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c27dde721..e399029b6 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/page.h>
/*
diff --git a/include/linux/io.h b/include/linux/io.h
index fb5a99800..de64c1e53 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
+struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);
+void *devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void devm_memunmap(struct device *dev, void *addr);
+
+void *__devm_memremap_pages(struct device *dev, struct resource *res);
+
+#ifdef CONFIG_ZONE_DEVICE
+void *devm_memremap_pages(struct device *dev, struct resource *res);
+#else
+static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
+{
+ /*
+ * Fail attempts to call devm_memremap_pages() without
+ * ZONE_DEVICE support enabled, this requires callers to fall
+ * back to plain devm_memremap() based on config
+ */
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+enum {
+ /* See memremap() kernel-doc for usage description... */
+ MEMREMAP_WB = 1 << 0,
+ MEMREMAP_WT = 1 << 1,
+};
+
+void *memremap(resource_size_t offset, size_t size, unsigned long flags);
+void memunmap(void *addr);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index beb9ce1c2..ce2fc3c74 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -52,6 +52,8 @@ enum {
*/
static inline int task_nice_ioprio(struct task_struct *task)
{
+ if (iso_task(task))
+ return 0;
return (task_nice(task) + 20) / 5;
}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3920a19d8..92f7177db 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 0b1e569f5..f8cea1448 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
implement it. */
void (*set_need_watch)(void *send_info, bool enable);
+ /*
+ * Called when flushing all pending messages.
+ */
+ void (*flush_messages)(void *send_info);
+
/* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
upper layer until the start_processing() function in the handlers
is called, and the lower layer must get the interface from that
call. */
-int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60a..f1f32af6d 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
+ __s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
+ __s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
- __u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
+#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 51744bcf7..11bf09288 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,8 +110,8 @@ enum {
/*
* Return value for chip->irq_set_affinity()
*
- * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
+ * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
* struct irq_common_data - per irq data shared by all irqchips
* @state_use_accessors: status information for irq chip functions.
* Use accessor functions to deal with it
+ * @node: node index useful for balancing
+ * @handler_data: per-IRQ data for the irq_chip methods
+ * @affinity: IRQ affinity on SMP
+ * @msi_desc: MSI descriptor
*/
struct irq_common_data {
unsigned int state_use_accessors;
+#ifdef CONFIG_NUMA
+ unsigned int node;
+#endif
+ void *handler_data;
+ struct msi_desc *msi_desc;
+ cpumask_var_t affinity;
};
/**
@@ -139,38 +149,26 @@ struct irq_common_data {
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
- * @node: node index useful for balancing
* @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
* @parent_data: pointer to parent struct irq_data to support hierarchy
* irq_domain
- * @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
- * @msi_desc: MSI descriptor
- * @affinity: IRQ affinity on SMP
- *
- * The fields here need to overlay the ones in irq_desc until we
- * cleaned up the direct references and switched everything over to
- * irq_data.
*/
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
- unsigned int node;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_data *parent_data;
#endif
- void *handler_data;
void *chip_data;
- struct msi_desc *msi_desc;
- cpumask_var_t affinity;
};
/*
@@ -190,6 +188,7 @@ struct irq_data {
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
+ * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
+ IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
+static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
+}
+
+static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
+}
/*
* Functions for chained handlers which can be enabled/disabled by the
@@ -324,8 +338,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
- * @irq_suspend: function called from core code on suspend once per chip
- * @irq_resume: function called from core code on resume once per chip
+ * @irq_suspend: function called from core code on suspend once per
+ * chip, when one or more interrupts are installed
+ * @irq_resume: function called from core code on resume once per chip,
+ * when one ore more interrupts are installed
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_calc_mask: Optional function to set irq_data.mask for special cases
* @irq_print_chip: optional to print special chip info in show_interrupts
@@ -459,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
* Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq()
*/
-extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
-extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_level_irq(struct irq_desc *desc);
+extern void handle_fasteoi_irq(struct irq_desc *desc);
+extern void handle_edge_irq(struct irq_desc *desc);
+extern void handle_edge_eoi_irq(struct irq_desc *desc);
+extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_percpu_irq(struct irq_desc *desc);
+extern void handle_percpu_devid_irq(struct irq_desc *desc);
+extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -488,8 +504,7 @@ extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
/* Enable/disable irq debugging output: */
@@ -626,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->handler_data : NULL;
+ return d ? d->common->handler_data : NULL;
}
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
- return d->handler_data;
+ return d->common->handler_data;
}
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->msi_desc : NULL;
+ return d ? d->common->msi_desc : NULL;
}
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
- return d->msi_desc;
+ return d->common->msi_desc;
}
static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -651,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
-static inline int irq_data_get_node(struct irq_data *d)
+static inline int irq_common_data_get_node(struct irq_common_data *d)
{
+#ifdef CONFIG_NUMA
return d->node;
+#else
+ return 0;
+#endif
+}
+
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return irq_common_data_get_node(d->common);
}
static inline struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
- return d ? d->affinity : NULL;
+ return d ? d->common->affinity : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- return d->affinity;
+ return d->common->affinity;
}
unsigned int arch_dynirq_lower_bound(unsigned int from);
@@ -762,6 +786,12 @@ struct irq_chip_type {
* @reg_base: Register base address (virtual)
* @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
* @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
+ * @suspend: Function called from core code on suspend once per
+ * chip; can be useful instead of irq_chip::suspend to
+ * handle chip details even when no interrupts are in use
+ * @resume: Function called from core code on resume once per chip;
+ * can be useful instead of irq_chip::suspend to handle
+ * chip details even when no interrupts are in use
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -788,6 +818,8 @@ struct irq_chip_generic {
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
+ void (*suspend)(struct irq_chip_generic *gc);
+ void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ffbc034c8..9eeeb9589 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -104,6 +104,8 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_ISACTIVER GICD_ISACTIVER
+#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
@@ -268,9 +270,12 @@
#define ICH_LR_EOI (1UL << 41)
#define ICH_LR_GROUP (1UL << 60)
+#define ICH_LR_HW (1UL << 61)
#define ICH_LR_STATE (3UL << 62)
#define ICH_LR_PENDING_BIT (1UL << 62)
#define ICH_LR_ACTIVE_BIT (1UL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
@@ -288,6 +293,7 @@
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
@@ -360,6 +366,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/msi.h>
/*
* We need a value to serve as a irq-type for LPIs. Choose one that will
@@ -384,6 +391,12 @@ static inline void gic_write_eoir(u64 irq)
isb();
}
+static inline void gic_write_dir(u64 irq)
+{
+ asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
+ isb();
+}
+
struct irq_domain;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9de976b4f..b8901dfd9 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GIC_CPU_DEACTIVATE 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
+
+#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
+
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
-#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
@@ -95,11 +100,10 @@
struct device_node;
-void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_cpu_if_down(void);
+int gic_cpu_if_down(unsigned int gic_nr);
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad3734..4e6861605 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr) ((intr) % 32)
+#define GIC_INTR_OFS(intr) ({ \
+ unsigned bits = mips_cm_is64 ? 64 : 32; \
+ unsigned reg_idx = (intr) / bits; \
+ unsigned reg_width = bits / 8; \
+ \
+ reg_idx * reg_width; \
+})
+#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
/* Polarity : Reset Value is always 0 */
#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +106,8 @@
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fcea4e48e..a587a3336 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- return irq_to_desc(data->irq);
-#else
- return container_of(data, struct irq_desc, irq_data);
-#endif
+ return container_of(data->common, struct irq_desc, irq_common_data);
}
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
- return desc->irq_data.handler_data;
+ return desc->irq_common_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
- return desc->irq_data.msi_desc;
+ return desc->irq_common_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
- * handle an interrupt. If the descriptor is attached to an
- * irqchip-style controller then we call the ->handle_irq() handler,
- * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ * handle an interrupt.
*/
-static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
- desc->handle_irq(irq, desc);
+ desc->handle_irq(desc);
}
int generic_handle_irq(unsigned int irq);
@@ -166,33 +160,14 @@ static inline int handle_domain_irq(struct irq_domain *domain,
#endif
/* Test to see if a driver has successfully requested an irq */
-static inline int irq_has_action(unsigned int irq)
+static inline int irq_desc_has_action(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
-/* caller has locked the irq_desc and both params are valid */
-static inline void __irq_set_handler_locked(unsigned int irq,
- irq_flow_handler_t handler)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->handle_irq = handler;
-}
-
-/* caller has locked the irq_desc and both params are valid */
-static inline void
-__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handler, const char *name)
+static inline int irq_has_action(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- irq_desc_get_irq_data(desc)->chip = chip;
- desc->handle_irq = handler;
- desc->name = name;
+ return irq_desc_has_action(irq_to_desc(irq));
}
/**
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 744ac0ec9..f644fdb06 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -45,6 +45,20 @@ struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+};
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@ struct irq_data;
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@ struct irq_domain {
/* Optional data */
struct device_node *of_node;
+ enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
@@ -145,6 +161,11 @@ enum {
IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
};
+static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
+{
+ return d->of_node;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
irq_hw_number_t hwirq_max, int direct_max,
@@ -161,9 +182,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+ return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 62d543004..661bed0ed 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
struct irq_desc;
struct irq_data;
-typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d32615280..000000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs. Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks. By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
- unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
- __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...) /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
- return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
- free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle. Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process. To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time. When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s handle_t; /* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s journal_t; /* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK 1
-#define JFS_COMMIT_BLOCK 2
-#define JFS_SUPERBLOCK_V1 3
-#define JFS_SUPERBLOCK_V2 4
-#define JFS_REVOKE_BLOCK 5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
- __be32 h_magic;
- __be32 h_blocktype;
- __be32 h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
- __be32 t_blocknr; /* The on-disk block number */
- __be32 t_flags; /* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
- journal_header_t r_header;
- __be32 r_count; /* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
-#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
-
-
-/*
- * The journal superblock. All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
- journal_header_t s_header;
-
-/* 0x000C */
- /* Static information describing the journal */
- __be32 s_blocksize; /* journal device blocksize */
- __be32 s_maxlen; /* total blocks in journal file */
- __be32 s_first; /* first block of log information */
-
-/* 0x0018 */
- /* Dynamic information describing the current state of the log */
- __be32 s_sequence; /* first commit ID expected in log */
- __be32 s_start; /* blocknr of start of log */
-
-/* 0x0020 */
- /* Error value, as set by journal_abort(). */
- __be32 s_errno;
-
-/* 0x0024 */
- /* Remaining fields are only valid in a version-2 superblock */
- __be32 s_feature_compat; /* compatible feature set */
- __be32 s_feature_incompat; /* incompatible feature set */
- __be32 s_feature_ro_compat; /* readonly-compatible feature set */
-/* 0x0030 */
- __u8 s_uuid[16]; /* 128-bit uuid for journal */
-
-/* 0x0040 */
- __be32 s_nr_users; /* Nr of filesystems sharing log */
-
- __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
- __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
- __be32 s_max_trans_data; /* Limit of data blocks per trans. */
-
-/* 0x0050 */
- __u32 s_padding[44];
-
-/* 0x0100 */
- __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES 0
-#define JFS_KNOWN_ROCOMPAT_FEATURES 0
-#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
- BH_JBD /* Has an attached ext3 journal_head */
- = BH_PrivateStart,
- BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
- BH_Freed, /* Has been freed (truncated) */
- BH_Revoked, /* Has been revoked from the log */
- BH_RevokeValid, /* Revoked flag is valid */
- BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
- BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
- BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
- BH_JBDPrivateStart, /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert) BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...) J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...) \
- ({ \
- int val = (expr); \
- if (!val) { \
- printk(KERN_ERR \
- "EXT3-fs unexpected failure: %s;\n",# expr); \
- printk(KERN_ERR why "\n"); \
- } \
- val; \
- })
-#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
- /* Which compound transaction is this update a part of? */
- transaction_t *h_transaction;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
- int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
- int h_err;
-
- /* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_aborted: 1; /* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism. It
- * tracks a compound transaction through its various states:
- *
- * RUNNING: accepting new updates
- * LOCKED: Updates still running but we don't accept new ones
- * RUNDOWN: Updates are tidying up but have finished requesting
- * new buffers to modify (state not used for now)
- * FLUSH: All updates complete, but we are still writing to disk
- * COMMIT: All data on disk, writing commit record
- * FINISHED: We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- * j_list_lock
- * ->jbd_lock_bh_journal_head() (This is "innermost")
- *
- * j_state_lock
- * ->jbd_lock_bh_state()
- *
- * jbd_lock_bh_state()
- * ->j_list_lock
- *
- * j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
- * ->j_list_lock (journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
- /* Pointer to the journal for this transaction. [no locking] */
- journal_t *t_journal;
-
- /* Sequence number for this transaction [no locking] */
- tid_t t_tid;
-
- /*
- * Transaction's current state
- * [no locking - only kjournald alters this]
- * [j_list_lock] guards transition of a transaction into T_FINISHED
- * state and subsequent call of __journal_drop_transaction()
- * FIXME: needs barriers
- * KLUDGE: [use j_state_lock]
- */
- enum {
- T_RUNNING,
- T_LOCKED,
- T_FLUSH,
- T_COMMIT,
- T_COMMIT_RECORD,
- T_FINISHED
- } t_state;
-
- /*
- * Where in the log does this transaction's commit start? [no locking]
- */
- unsigned int t_log_start;
-
- /* Number of buffers on the t_buffers list [j_list_lock] */
- int t_nr_buffers;
-
- /*
- * Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
- */
- struct journal_head *t_reserved_list;
-
- /*
- * Doubly-linked circular list of all buffers under writeout during
- * commit [j_list_lock]
- */
- struct journal_head *t_locked_list;
-
- /*
- * Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
- */
- struct journal_head *t_buffers;
-
- /*
- * Doubly-linked circular list of all data buffers still to be
- * flushed before this transaction can be committed [j_list_lock]
- */
- struct journal_head *t_sync_datalist;
-
- /*
- * Doubly-linked circular list of all forget buffers (superseded
- * buffers which we can un-checkpoint once this transaction commits)
- * [j_list_lock]
- */
- struct journal_head *t_forget;
-
- /*
- * Doubly-linked circular list of all buffers still to be flushed before
- * this transaction can be checkpointed. [j_list_lock]
- */
- struct journal_head *t_checkpoint_list;
-
- /*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
- * Doubly-linked circular list of temporary buffers currently undergoing
- * IO in the log [j_list_lock]
- */
- struct journal_head *t_iobuf_list;
-
- /*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
- */
- struct journal_head *t_shadow_list;
-
- /*
- * Doubly-linked circular list of control buffers being written to the
- * log. [j_list_lock]
- */
- struct journal_head *t_log_list;
-
- /*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
- * Number of outstanding updates running on this transaction
- * [t_handle_lock]
- */
- int t_updates;
-
- /*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
- */
- int t_outstanding_credits;
-
- /*
- * Forward and backward links for the circular list of all transactions
- * awaiting checkpoint. [j_list_lock]
- */
- transaction_t *t_cpnext, *t_cpprev;
-
- /*
- * When will the transaction expire (become due for commit), in jiffies?
- * [no locking]
- */
- unsigned long t_expires;
-
- /*
- * When this transaction started, in nanoseconds [no locking]
- */
- ktime_t t_start_time;
-
- /*
- * How many handles used this transaction? [t_handle_lock]
- */
- int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint: Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- * is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- * takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
- /* General journaling state flags [j_state_lock] */
- unsigned long j_flags;
-
- /*
- * Is there an outstanding uncleared error on the journal (from a prior
- * abort)? [j_state_lock]
- */
- int j_errno;
-
- /* The superblock buffer */
- struct buffer_head *j_sb_buffer;
- journal_superblock_t *j_superblock;
-
- /* Version of the superblock format */
- int j_format_version;
-
- /*
- * Protect the various scalars in the journal
- */
- spinlock_t j_state_lock;
-
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
- */
- int j_barrier_count;
-
- /*
- * Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_running_transaction;
-
- /*
- * the transaction we are pushing to disk
- * [j_state_lock] [caller holding open handle]
- */
- transaction_t *j_committing_transaction;
-
- /*
- * ... and a linked circular list of all transactions waiting for
- * checkpointing. [j_list_lock]
- */
- transaction_t *j_checkpoint_transactions;
-
- /*
- * Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
- */
- wait_queue_head_t j_wait_transaction_locked;
-
- /* Wait queue for waiting for checkpointing to complete */
- wait_queue_head_t j_wait_logspace;
-
- /* Wait queue for waiting for commit to complete */
- wait_queue_head_t j_wait_done_commit;
-
- /* Wait queue to trigger checkpointing */
- wait_queue_head_t j_wait_checkpoint;
-
- /* Wait queue to trigger commit */
- wait_queue_head_t j_wait_commit;
-
- /* Wait queue to wait for updates to complete */
- wait_queue_head_t j_wait_updates;
-
- /* Semaphore for locking against concurrent checkpoints */
- struct mutex j_checkpoint_mutex;
-
- /*
- * Journal head: identifies the first unused block in the journal.
- * [j_state_lock]
- */
- unsigned int j_head;
-
- /*
- * Journal tail: identifies the oldest still-used block in the journal.
- * [j_state_lock]
- */
- unsigned int j_tail;
-
- /*
- * Journal free: how many free blocks are there in the journal?
- * [j_state_lock]
- */
- unsigned int j_free;
-
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
- */
- unsigned int j_first;
- unsigned int j_last;
-
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
- */
- struct block_device *j_dev;
- int j_blocksize;
- unsigned int j_blk_offset;
-
- /*
- * Device which holds the client fs. For internal journal this will be
- * equal to j_dev.
- */
- struct block_device *j_fs_dev;
-
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
-
- /*
- * Protects the buffer lists and internal buffer state.
- */
- spinlock_t j_list_lock;
-
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
- struct inode *j_inode;
-
- /*
- * Sequence number of the oldest transaction in the log [j_state_lock]
- */
- tid_t j_tail_sequence;
-
- /*
- * Sequence number of the next transaction to grant [j_state_lock]
- */
- tid_t j_transaction_sequence;
-
- /*
- * Sequence number of the most recently committed transaction
- * [j_state_lock].
- */
- tid_t j_commit_sequence;
-
- /*
- * Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
- */
- tid_t j_commit_request;
-
- /*
- * Sequence number of the most recent transaction someone is waiting
- * for to commit.
- * [j_state_lock]
- */
- tid_t j_commit_waited;
-
- /*
- * Journal uuid: identifies the object (filesystem, LVM volume etc)
- * backed by this journal. This will eventually be replaced by an array
- * of uuids, allowing us to index multiple devices within a single
- * journal and to perform atomic updates across them.
- */
- __u8 j_uuid[16];
-
- /* Pointer to the current commit thread for this journal */
- struct task_struct *j_task;
-
- /*
- * Maximum number of metadata buffers to allow in a single compound
- * commit transaction
- */
- int j_max_transaction_buffers;
-
- /*
- * What is the maximum transaction lifetime before we begin a commit?
- */
- unsigned long j_commit_interval;
-
- /* The timer used to wakeup the commit thread: */
- struct timer_list j_commit_timer;
-
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
- */
- spinlock_t j_revoke_lock;
- struct jbd_revoke_table_s *j_revoke;
- struct jbd_revoke_table_s *j_revoke_table[2];
-
- /*
- * array of bhs for journal_commit_transaction
- */
- struct buffer_head **j_wbuf;
- int j_wbufsize;
-
- /*
- * this is the pid of the last person to run a synchronous operation
- * through the journal.
- */
- pid_t j_last_sync_writer;
-
- /*
- * the average amount of time in nanoseconds it takes to commit a
- * transaction to the disk. [j_state_lock]
- */
- u64 j_average_commit_time;
-
- /*
- * An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
- */
- void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
-#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
-#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
-#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
-#define JFS_BARRIER 0x020 /* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t *transaction,
- struct journal_head *jh_in,
- struct journal_head **jh_out,
- unsigned int blocknr);
-
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase. j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe. We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
- return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int journal_restart (handle_t *, int nblocks);
-extern int journal_extend (handle_t *, int nblocks);
-extern int journal_get_write_access(handle_t *, struct buffer_head *);
-extern int journal_get_create_access (handle_t *, struct buffer_head *);
-extern int journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int journal_dirty_data (handle_t *, struct buffer_head *);
-extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void journal_release_buffer (handle_t *, struct buffer_head *);
-extern int journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern void journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int journal_stop(handle_t *);
-extern int journal_flush (journal_t *);
-extern void journal_lock_updates (journal_t *);
-extern void journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
- struct block_device *fs_dev,
- int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int journal_update_format (journal_t *);
-extern int journal_check_used_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_check_available_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_set_features
- (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int journal_create (journal_t *);
-extern int journal_load (journal_t *journal);
-extern int journal_destroy (journal_t *);
-extern int journal_recover (journal_t *journal);
-extern int journal_wipe (journal_t *, int);
-extern int journal_skip_recovery (journal_t *);
-extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
- int);
-extern void journal_abort (journal_t *, int);
-extern int journal_errno (journal_t *);
-extern void journal_ack_err (journal_t *);
-extern int journal_clear_err (journal_t *);
-extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
- kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int journal_init_revoke(journal_t *, int);
-extern void journal_destroy_revoke_caches(void);
-extern int journal_init_revoke_caches(void);
-
-extern void journal_destroy_revoke(journal_t *);
-extern int journal_revoke (handle_t *,
- unsigned int, struct buffer_head *);
-extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *,
- transaction_t *, int);
-
-/* Recovery revoke support */
-extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void journal_clear_revoke(journal_t *);
-extern void journal_switch_revoke_table(journal_t *journal);
-extern void journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void __journal_drop_transaction(journal_t *, transaction_t *);
-extern int cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag. This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
- return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
- if (handle->h_aborted)
- return 1;
- return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
- handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__ */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
- int difference = (x - y);
- return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- if (journal->j_committing_transaction)
- nblocks += journal->j_committing_transaction->
- t_outstanding_credits;
- return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None 0 /* Not journaled */
-#define BJ_SyncData 1 /* Normal data: flush before commit */
-#define BJ_Metadata 2 /* Normal journaled metadata */
-#define BJ_Forget 3 /* Buffer superseded by this transaction */
-#define BJ_IO 4 /* Buffer is for temporary IO use */
-#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl 6 /* Buffer contains log descriptors */
-#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
-#define BJ_Locked 8 /* Locked for I/O during commit */
-#define BJ_Types 9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh) do {} while (0)
-#define print_buffer_fields(bh) do {} while (0)
-#define print_buffer_trace(bh) do {} while (0)
-#define BUFFER_TRACE(bh, info) do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
-#define JBUFFER_TRACE(jh, info) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index eb1cebed3..df07e7848 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/slab.h>
+#include <linux/bit_spinlock.h>
#include <crypto/hash.h>
#endif
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
BUFFER_FNS(Shadow, shadow)
BUFFER_FNS(Verified, verified)
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+ return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+ return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
#define J_ASSERT(assert) BUG_ON(!(assert))
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc534323..000000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
- return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
- return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 535fd3bb1..9384572f6 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -164,7 +164,11 @@ static inline u64 get_jiffies_64(void)
* Have the 32 bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
+#ifdef CONFIG_SCHED_BFS
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
+#else
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
/*
* Change timeval to jiffies, trying to avoid the
@@ -351,7 +355,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
+static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
@@ -363,18 +367,11 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
}
extern unsigned long __usecs_to_jiffies(const unsigned int u);
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
- return u * (HZ / USEC_PER_SEC);
-}
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
@@ -405,7 +402,7 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
-static inline unsigned long usecs_to_jiffies(const unsigned int u)
+static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +413,25 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
}
}
-extern unsigned long timespec_to_jiffies(const struct timespec *value);
-extern void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value);
+extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
+extern void jiffies_to_timespec64(const unsigned long jiffies,
+ struct timespec64 *value);
+static inline unsigned long timespec_to_jiffies(const struct timespec *value)
+{
+ struct timespec64 ts = timespec_to_timespec64(*value);
+
+ return timespec64_to_jiffies(&ts);
+}
+
+static inline void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value)
+{
+ struct timespec64 ts;
+
+ jiffies_to_timespec64(jiffies, &ts);
+ *value = timespec64_to_timespec(ts);
+}
+
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473f2..f1094238a 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,50 @@
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_branch_likely()
+ * static_branch_unlikely()
+ *
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
+ *
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
*
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * In addition to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'.
*
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).
* OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +62,10 @@
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
- *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- * if (static_key_true(&my_key)) {
- * }
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
*
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
*/
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +109,8 @@ struct static_key {
#ifndef __ASSEMBLY__
enum jump_label_type {
- JUMP_LABEL_DISABLE = 0,
- JUMP_LABEL_ENABLE,
+ JUMP_LABEL_NOP = 0,
+ JUMP_LABEL_JMP,
};
struct module;
@@ -101,33 +124,18 @@ static inline int static_key_count(struct static_key *key)
#ifdef HAVE_JUMP_LABEL
-#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
-#define JUMP_LABEL_TYPE_MASK 1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
- return (struct jump_entry *)((unsigned long)key->entries
- & ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
- if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
- JUMP_LABEL_TYPE_TRUE_BRANCH)
- return true;
- return false;
-}
+#define JUMP_TYPE_FALSE 0UL
+#define JUMP_TYPE_TRUE 1UL
+#define JUMP_TYPE_MASK 1UL
static __always_inline bool static_key_false(struct static_key *key)
{
- return arch_static_branch(key);
+ return arch_static_branch(key, false);
}
static __always_inline bool static_key_true(struct static_key *key)
{
- return !static_key_false(key);
+ return !arch_static_branch(key, true);
}
extern struct jump_entry __start___jump_table[];
@@ -145,12 +153,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+#define STATIC_KEY_INIT_TRUE \
{ .enabled = ATOMIC_INIT(1), \
- .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE \
{ .enabled = ATOMIC_INIT(0), \
- .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+ .entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
@@ -198,10 +206,8 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
- { .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
- { .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
#endif /* HAVE_JUMP_LABEL */
@@ -213,6 +219,157 @@ static inline bool static_key_enabled(struct static_key *key)
return static_key_count(key) > 0;
}
+static inline void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+ struct static_key key;
+};
+
+struct static_key_false {
+ struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name) \
+ struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name) \
+ struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch| likely (1) | unlikely (0)
+ * -----------+-----------------------+------------------
+ * | |
+ * true (1) | ... | ...
+ * | NOP | JMP L
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ * | |
+ * false (0) | ... | ...
+ * | JMP L | NOP
+ * | <br-stmts> | 1: ...
+ * | L: ... |
+ * | |
+ * | | L: <br-stmts>
+ * | | jmp 1b
+ * | |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ * enabled type branch instuction
+ * -----------------------------+-----------
+ * 0 0 0 | NOP
+ * 0 0 1 | JMP
+ * 0 1 0 | NOP
+ * 0 1 1 | JMP
+ *
+ * 1 0 0 | JMP
+ * 1 0 1 | NOP
+ * 1 1 0 | JMP
+ * 1 1 1 | NOP
+ *
+ * Which gives the following functions:
+ *
+ * dynamic: instruction = enabled ^ branch
+ * static: instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = !arch_static_branch(&(x)->key, true); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = !arch_static_branch_jump(&(x)->key, true); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#define static_branch_unlikely(x) \
+({ \
+ bool branch; \
+ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
+ branch = arch_static_branch_jump(&(x)->key, false); \
+ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+ branch = arch_static_branch(&(x)->key, false); \
+ else \
+ branch = ____wrong_branch_error(); \
+ branch; \
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_H */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d777b..4b9f85c96 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,11 +10,19 @@ struct vm_struct;
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#include <asm/kasan.h>
+#include <asm/pgtable.h>
#include <linux/sched.h>
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 123be25ea..5d4e9c4b8 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
+size_t kernfs_path_len(struct kernfs_node *kn);
char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
+static inline size_t kernfs_path_len(struct kernfs_node *kn)
+{ return 0; }
+
static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen)
{ return NULL; }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e804306ef..d140b1e9f 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -16,7 +16,7 @@
#include <uapi/linux/kexec.h>
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -318,12 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-#else /* !CONFIG_KEXEC */
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
+int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
+int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+ unsigned long buf_len);
+int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
+ Elf_Shdr *sechdrs, unsigned int relsec);
+int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ unsigned int relsec);
+
+#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
-#endif /* CONFIG_KEXEC */
+#define kexec_in_progress false
+#endif /* CONFIG_KEXEC_CORE */
#endif /* !defined(__ASSEBMLY__) */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b723a..953f283f8 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
#endif
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0555cc66a..fcfd2bf14 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
UMH_DISABLED,
};
-extern void usermodehelper_init(void);
-
extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1ab54754a..8f6849084 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool within_kprobe_blacklist(unsigned long addr);
+
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d55206c..e691b6a23 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
const char namefmt[], ...);
#define kthread_create(threadfn, data, namefmt, arg...) \
- kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+ kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
})
void kthread_bind(struct task_struct *k, unsigned int cpu);
+void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8ef..e480f9fbd 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_REQ_SMI 26
+#define KVM_REQ_HV_CRASH 27
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -241,6 +242,7 @@ struct kvm_vcpu {
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
+ unsigned int halt_poll_ns;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
@@ -363,9 +365,6 @@ struct kvm {
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- u32 bsp_vcpu_id;
-#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
int last_boosted_vcpu;
@@ -424,8 +423,15 @@ struct kvm {
#define vcpu_unimpl(vcpu, fmt, ...) \
kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug(vcpu, fmt, ...) \
+ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
+ * the caller has read kvm->online_vcpus before (as is the case
+ * for kvm_for_each_vcpu, for example).
+ */
smp_rmb();
return kvm->vcpus[i];
}
@@ -436,6 +442,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++)
+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu->vcpu_id == id)
+ return vcpu;
+ return NULL;
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
@@ -1055,22 +1072,9 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
-}
-
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
-}
-
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-
#else
-
static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-
#endif
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 75e3af01e..3f021dc5d 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -31,6 +31,9 @@ enum {
ND_CMD_ARS_STATUS_MAX = SZ_4K,
ND_MAX_MAPPINGS = 32,
+ /* region flag indicating to direct-map persistent memory by default */
+ ND_REGION_PAGEMAP = 0,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -91,6 +94,7 @@ struct nd_region_desc {
void *provider_data;
int num_lanes;
int numa_node;
+ unsigned long flags;
};
struct nvdimm_bus;
diff --git a/include/linux/list.h b/include/linux/list.h
index feb773c76..3e3e64a61 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -672,6 +672,11 @@ static inline void hlist_add_fake(struct hlist_node *n)
n->pprev = &n->next;
}
+static inline bool hlist_fake(struct hlist_node *h)
+{
+ return h->pprev == &h->next;
+}
+
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0bc..fd4ca0b4f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
-#include <asm/cmpxchg.h>
struct llist_head {
struct llist_node *first;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9df..ffb9c9da4 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
} fam;
};
+struct lsm_ioctlop_audit {
+ struct path path;
+ u16 cmd;
+};
+
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_KMOD 8
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
+#define LSM_AUDIT_DATA_IOCTL_OP 11
union {
struct path path;
struct dentry *dentry;
@@ -68,6 +74,7 @@ struct common_audit_data {
} key_struct;
#endif
char *kmod_name;
+ struct lsm_ioctlop_audit *op;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9429f054c..ec3a6bab2 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1881,8 +1881,10 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
extern int __init security_module_enable(const char *module);
extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA_STACKED
-void __init yama_add_hooks(void);
+#ifdef CONFIG_SECURITY_YAMA
+extern void __init yama_add_hooks(void);
+#else
+static inline void __init yama_add_hooks(void) { }
#endif
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 68c424544..74deadb42 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/completion.h>
@@ -67,7 +67,8 @@ struct mbox_chan_ops {
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
* @of_xlate: Controller driver specific mapping of channel via DT
- * @poll: API private. Used to poll for TXDONE on all channels.
+ * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
+ * channels.
* @node: API private. To hook into list of controllers.
*/
struct mbox_controller {
@@ -81,7 +82,7 @@ struct mbox_controller {
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
- struct timer_list poll;
+ struct hrtimer poll_hrt;
struct list_head node;
};
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a16b1f9c1..0962b2ca6 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -6,6 +6,7 @@
#include <linux/mod_devicetable.h>
struct mei_cl_device;
+struct mei_device;
typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
u32 events, void *context);
@@ -17,6 +18,8 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
* Drivers for MEI devices will get an mei_cl_device pointer
* when being probed and shall use it for doing ME bus I/O.
*
+ * @bus_list: device on the bus list
+ * @bus: parent mei device
* @dev: linux driver model device pointer
* @me_cl: me client
* @cl: mei client
@@ -25,10 +28,16 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
* @event_cb: Drivers register this callback to get asynchronous ME
* events (e.g. Rx buffer pending) notifications.
* @event_context: event callback run context
+ * @events_mask: Events bit mask requested by driver.
* @events: Events bitmask sent to the driver.
+ *
+ * @do_match: wheather device can be matched with a driver
+ * @is_added: device is already scanned
* @priv_data: client private data
*/
struct mei_cl_device {
+ struct list_head bus_list;
+ struct mei_device *bus;
struct device dev;
struct mei_me_client *me_cl;
@@ -38,8 +47,12 @@ struct mei_cl_device {
struct work_struct event_work;
mei_cl_event_cb_t event_cb;
void *event_context;
+ unsigned long events_mask;
unsigned long events;
+ unsigned int do_match:1;
+ unsigned int is_added:1;
+
void *priv_data;
};
@@ -65,10 +78,12 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
int mei_cl_register_event_cb(struct mei_cl_device *device,
+ unsigned long event_mask,
mei_cl_event_cb_t read_cb, void *context);
#define MEI_CL_EVENT_RX 0
#define MEI_CL_EVENT_TX 1
+#define MEI_CL_EVENT_NOTIF 2
void *mei_cl_get_drvdata(const struct mei_cl_device *device);
void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cc4b01972..c518eb589 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
+bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
@@ -323,7 +325,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
-int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
extern void __memblock_dump_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73b02b0a8..3e3318ddf 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/page_counter.h>
+#include <linux/vmpressure.h>
+#include <linux/eventfd.h>
+#include <linux/mmzone.h>
+#include <linux/writeback.h>
struct mem_cgroup;
struct page;
@@ -67,12 +72,220 @@ enum mem_cgroup_events_index {
MEMCG_NR_EVENTS,
};
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH,
+ MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_TARGET_NUMAINFO,
+ MEM_CGROUP_NTARGETS,
+};
+
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
+struct cg_proto {
+ struct page_counter memory_allocated; /* Current allocated memory. */
+ struct percpu_counter sockets_allocated; /* Current number of sockets. */
+ int memory_pressure;
+ long sysctl_mem[3];
+ unsigned long flags;
+ /*
+ * memcg field is used to find which memcg we belong directly
+ * Each memcg struct can hold more than one cg_proto, so container_of
+ * won't really cut.
+ *
+ * The elegant solution would be having an inverse function to
+ * proto_cgroup in struct proto, but that means polluting the structure
+ * for everybody, instead of just for memcg users.
+ */
+ struct mem_cgroup *memcg;
+};
+
#ifdef CONFIG_MEMCG
+struct mem_cgroup_stat_cpu {
+ long count[MEM_CGROUP_STAT_NSTATS];
+ unsigned long events[MEMCG_NR_EVENTS];
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+struct mem_cgroup_reclaim_iter {
+ struct mem_cgroup *position;
+ /* scan generation, increased every round-trip */
+ unsigned int generation;
+};
+
+/*
+ * per-zone information in memory controller.
+ */
+struct mem_cgroup_per_zone {
+ struct lruvec lruvec;
+ unsigned long lru_size[NR_LRU_LISTS];
+
+ struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
+};
+
+struct mem_cgroup_per_node {
+ struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_threshold {
+ struct eventfd_ctx *eventfd;
+ unsigned long threshold;
+};
+
+/* For threshold */
+struct mem_cgroup_threshold_ary {
+ /* An array index points to threshold just below or equal to usage. */
+ int current_threshold;
+ /* Size of entries[] */
+ unsigned int size;
+ /* Array of thresholds */
+ struct mem_cgroup_threshold entries[0];
+};
+
+struct mem_cgroup_thresholds {
+ /* Primary thresholds array */
+ struct mem_cgroup_threshold_ary *primary;
+ /*
+ * Spare threshold array.
+ * This is needed to make mem_cgroup_unregister_event() "never fail".
+ * It must be able to store at least primary->size - 1 entries.
+ */
+ struct mem_cgroup_threshold_ary *spare;
+};
+
+/*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+ * statistics based on the statistics developed by Rik Van Riel for clock-pro,
+ * to help the administrator determine what knobs to tune.
+ */
+struct mem_cgroup {
+ struct cgroup_subsys_state css;
+
+ /* Accounted resources */
+ struct page_counter memory;
+ struct page_counter memsw;
+ struct page_counter kmem;
+
+ /* Normal memory consumption range */
+ unsigned long low;
+ unsigned long high;
+
+ unsigned long soft_limit;
+
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
+ /* css_online() has been completed */
+ int initialized;
+
+ /*
+ * Should the accounting and control be hierarchical, per subtree?
+ */
+ bool use_hierarchy;
+
+ /* protected by memcg_oom_lock */
+ bool oom_lock;
+ int under_oom;
+
+ int swappiness;
+ /* OOM-Killer disable */
+ int oom_kill_disable;
+
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /*
+ * Should we move charges of a task when a task is moved into this
+ * mem_cgroup ? And what type of charges should we move ?
+ */
+ unsigned long move_charge_at_immigrate;
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ struct task_struct *move_lock_task;
+ unsigned long move_lock_flags;
+ /*
+ * percpu counter.
+ */
+ struct mem_cgroup_stat_cpu __percpu *stat;
+
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+ struct cg_proto tcp_mem;
+#endif
+#if defined(CONFIG_MEMCG_KMEM)
+ /* Index in the kmem_cache->memcg_params.memcg_caches array */
+ int kmemcg_id;
+ bool kmem_acct_activated;
+ bool kmem_acct_active;
+#endif
+
+ int last_scanned_node;
+#if MAX_NUMNODES > 1
+ nodemask_t scan_nodes;
+ atomic_t numainfo_events;
+ atomic_t numainfo_updating;
+#endif
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+ struct wb_domain cgwb_domain;
+#endif
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+
+ struct mem_cgroup_per_node *nodeinfo[0];
+ /* WARNING: nodeinfo must be the last member here */
+};
extern struct cgroup_subsys_state *mem_cgroup_root_css;
-void mem_cgroup_events(struct mem_cgroup *memcg,
+/**
+ * mem_cgroup_events - count memory events against a cgroup
+ * @memcg: the memory cgroup
+ * @idx: the event index
+ * @nr: the number of events to account for
+ */
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
- unsigned int nr);
+ unsigned int nr)
+{
+ this_cpu_add(memcg->stat->events[idx], nr);
+}
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
@@ -90,15 +303,29 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
-bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
- struct mem_cgroup *root);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
-extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
+ return css ? container_of(css, struct mem_cgroup, css) : NULL;
+}
-extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
+static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+ struct mem_cgroup *root)
+{
+ if (root == memcg)
+ return true;
+ if (!root->use_hierarchy)
+ return false;
+ return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
+}
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
@@ -114,24 +341,68 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
-extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+ino_t page_cgroup_ino(struct page *page);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
-void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline bool mem_cgroup_disabled(void)
+{
+ if (memory_cgrp_subsys.disabled)
+ return true;
+ return false;
+}
/*
* For memory reclaim.
*/
-int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
-bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
-void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
-extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
- struct task_struct *p);
+
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int nr_pages);
+
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return true;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ memcg = mz->memcg;
+
+ return !!(memcg->css.flags & CSS_ONLINE);
+}
+
+static inline
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ return mz->lru_size[lru];
+}
+
+static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+{
+ unsigned long inactive_ratio;
+ unsigned long inactive;
+ unsigned long active;
+ unsigned long gb;
+
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
+
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+ if (gb)
+ inactive_ratio = int_sqrt(10 * gb);
+ else
+ inactive_ratio = 1;
+
+ return inactive * inactive_ratio < active;
+}
+
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
static inline void mem_cgroup_oom_enable(void)
{
@@ -156,18 +427,26 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-static inline bool mem_cgroup_disabled(void)
-{
- if (memory_cgrp_subsys.disabled)
- return true;
- return false;
-}
-
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+/**
+ * mem_cgroup_update_page_stat - update page state statistics
+ * @memcg: memcg to account against
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ *
+ * See mem_cgroup_begin_page_stat() for locking requirements.
+ */
+static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ VM_BUG_ON(!rcu_read_lock_held());
+
+ if (memcg)
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
@@ -184,13 +463,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
enum vm_event_item idx)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- __mem_cgroup_count_vm_event(mm, idx);
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ goto out;
+
+ switch (idx) {
+ case PGFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
+ break;
+ case PGMAJFAULT:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ break;
+ default:
+ BUG();
+ }
+out:
+ rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
@@ -199,8 +496,6 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
-#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-
static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr)
@@ -258,11 +553,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
return &zone->lruvec;
}
-static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
-{
- return NULL;
-}
-
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
@@ -275,12 +565,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
return true;
}
-static inline struct cgroup_subsys_state
- *mem_cgroup_css(struct mem_cgroup *memcg)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -392,8 +676,9 @@ enum {
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
-void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
- unsigned long *pdirty, unsigned long *pwriteback);
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
+ unsigned long *pheadroom, unsigned long *pdirty,
+ unsigned long *pwriteback);
#else /* CONFIG_CGROUP_WRITEBACK */
@@ -403,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
}
static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
- unsigned long *pavail,
+ unsigned long *pfilepages,
+ unsigned long *pheadroom,
unsigned long *pdirty,
unsigned long *pwriteback)
{
@@ -428,8 +714,8 @@ static inline void sock_release_memcg(struct sock *sk)
extern struct static_key memcg_kmem_enabled_key;
extern int memcg_nr_cache_ids;
-extern void memcg_get_cache_ids(void);
-extern void memcg_put_cache_ids(void);
+void memcg_get_cache_ids(void);
+void memcg_put_cache_ids(void);
/*
* Helper macro to loop through all memcg-specific caches. Callers must still
@@ -444,7 +730,10 @@ static inline bool memcg_kmem_enabled(void)
return static_key_false(&memcg_kmem_enabled_key);
}
-bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return memcg->kmem_acct_active;
+}
/*
* In general, we'll do everything in our power to not incur in any overhead
@@ -463,7 +752,15 @@ void __memcg_kmem_commit_charge(struct page *page,
struct mem_cgroup *memcg, int order);
void __memcg_kmem_uncharge_pages(struct page *page, int order);
-int memcg_cache_id(struct mem_cgroup *memcg);
+/*
+ * helper for acessing a memcg's index. It will be used as an index in the
+ * child cache array in kmem_cache, and also to derive its name. This function
+ * will return -1 when this is not a kmem-limited memcg.
+ */
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return memcg ? memcg->kmemcg_id : -1;
+}
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 6ffa0ac7f..8f60e899b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
-extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
+ bool for_device);
+extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283cc..8fcad63fa 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@ enum {
/* page 0 basic: slave adder 0x60 */
#define PM800_STATUS_1 (0x01)
-#define PM800_ONKEY_STS1 (1 << 0)
-#define PM800_EXTON_STS1 (1 << 1)
-#define PM800_CHG_STS1 (1 << 2)
-#define PM800_BAT_STS1 (1 << 3)
-#define PM800_VBUS_STS1 (1 << 4)
-#define PM800_LDO_PGOOD_STS1 (1 << 5)
-#define PM800_BUCK_PGOOD_STS1 (1 << 6)
+#define PM800_ONKEY_STS1 BIT(0)
+#define PM800_EXTON_STS1 BIT(1)
+#define PM800_CHG_STS1 BIT(2)
+#define PM800_BAT_STS1 BIT(3)
+#define PM800_VBUS_STS1 BIT(4)
+#define PM800_LDO_PGOOD_STS1 BIT(5)
+#define PM800_BUCK_PGOOD_STS1 BIT(6)
#define PM800_STATUS_2 (0x02)
-#define PM800_RTC_ALARM_STS2 (1 << 0)
+#define PM800_RTC_ALARM_STS2 BIT(0)
/* Wakeup Registers */
-#define PM800_WAKEUP1 (0x0D)
+#define PM800_WAKEUP1 (0x0D)
-#define PM800_WAKEUP2 (0x0E)
-#define PM800_WAKEUP2_INV_INT (1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR (1 << 1)
-#define PM800_WAKEUP2_INT_MASK (1 << 2)
+#define PM800_WAKEUP2 (0x0E)
+#define PM800_WAKEUP2_INV_INT BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR BIT(1)
+#define PM800_WAKEUP2_INT_MASK BIT(2)
-#define PM800_POWER_UP_LOG (0x10)
+#define PM800_POWER_UP_LOG (0x10)
/* Referance and low power registers */
#define PM800_LOW_POWER1 (0x20)
#define PM800_LOW_POWER2 (0x21)
-#define PM800_LOW_POWER_CONFIG3 (0x22)
-#define PM800_LOW_POWER_CONFIG4 (0x23)
+#define PM800_LOW_POWER_CONFIG3 (0x22)
+#define PM800_LOW_POWER_CONFIG4 (0x23)
/* GPIO register */
#define PM800_GPIO_0_1_CNTRL (0x30)
-#define PM800_GPIO0_VAL (1 << 0)
+#define PM800_GPIO0_VAL BIT(0)
#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO1_VAL (1 << 4)
+#define PM800_GPIO1_VAL BIT(4)
#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
#define PM800_GPIO_2_3_CNTRL (0x31)
-#define PM800_GPIO2_VAL (1 << 0)
+#define PM800_GPIO2_VAL BIT(0)
#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
-#define PM800_GPIO3_VAL (1 << 4)
+#define PM800_GPIO3_VAL BIT(4)
#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
#define PM800_GPIO3_MODE_MASK 0x1F
#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
-#define PM800_GPIO_4_CNTRL (0x32)
-#define PM800_GPIO4_VAL (1 << 0)
+#define PM800_GPIO_4_CNTRL (0x32)
+#define PM800_GPIO4_VAL BIT(0)
#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
#define PM800_HEADSET_CNTRL (0x38)
-#define PM800_HEADSET_DET_EN (1 << 7)
-#define PM800_HSDET_SLP (1 << 1)
+#define PM800_HEADSET_DET_EN BIT(7)
+#define PM800_HSDET_SLP BIT(1)
/* PWM register */
-#define PM800_PWM1 (0x40)
-#define PM800_PWM2 (0x41)
-#define PM800_PWM3 (0x42)
-#define PM800_PWM4 (0x43)
+#define PM800_PWM1 (0x40)
+#define PM800_PWM2 (0x41)
+#define PM800_PWM3 (0x42)
+#define PM800_PWM4 (0x43)
/* RTC Registers */
#define PM800_RTC_CONTROL (0xD0)
@@ -123,55 +123,55 @@ enum {
#define PM800_RTC_MISC4 (0xE4)
#define PM800_RTC_MISC5 (0xE7)
/* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN (1 << 0)
-#define PM800_ALARM_WAKEUP (1 << 4)
-#define PM800_ALARM (1 << 5)
-#define PM800_RTC1_USE_XO (1 << 7)
+#define PM800_ALARM1_EN BIT(0)
+#define PM800_ALARM_WAKEUP BIT(4)
+#define PM800_ALARM BIT(5)
+#define PM800_RTC1_USE_XO BIT(7)
/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
/* buck registers */
-#define PM800_SLEEP_BUCK1 (0x30)
+#define PM800_SLEEP_BUCK1 (0x30)
/* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1 (0x5A)
-#define PM800_BUCK1_SLP1_SHIFT 0
-#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1 (0x5A)
+#define PM800_BUCK1_SLP1_SHIFT 0
+#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
/* page 2 GPADC: slave adder 0x02 */
#define PM800_GPADC_MEAS_EN1 (0x01)
-#define PM800_MEAS_EN1_VBAT (1 << 2)
+#define PM800_MEAS_EN1_VBAT BIT(2)
#define PM800_GPADC_MEAS_EN2 (0x02)
-#define PM800_MEAS_EN2_RFTMP (1 << 0)
-#define PM800_MEAS_GP0_EN (1 << 2)
-#define PM800_MEAS_GP1_EN (1 << 3)
-#define PM800_MEAS_GP2_EN (1 << 4)
-#define PM800_MEAS_GP3_EN (1 << 5)
-#define PM800_MEAS_GP4_EN (1 << 6)
+#define PM800_MEAS_EN2_RFTMP BIT(0)
+#define PM800_MEAS_GP0_EN BIT(2)
+#define PM800_MEAS_GP1_EN BIT(3)
+#define PM800_MEAS_GP2_EN BIT(4)
+#define PM800_MEAS_GP3_EN BIT(5)
+#define PM800_MEAS_GP4_EN BIT(6)
#define PM800_GPADC_MISC_CONFIG1 (0x05)
#define PM800_GPADC_MISC_CONFIG2 (0x06)
-#define PM800_GPADC_MISC_GPFSM_EN (1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN BIT(0)
#define PM800_GPADC_SLOW_MODE(x) (x << 3)
-#define PM800_GPADC_MISC_CONFIG3 (0x09)
-#define PM800_GPADC_MISC_CONFIG4 (0x0A)
+#define PM800_GPADC_MISC_CONFIG3 (0x09)
+#define PM800_GPADC_MISC_CONFIG4 (0x0A)
-#define PM800_GPADC_PREBIAS1 (0x0F)
+#define PM800_GPADC_PREBIAS1 (0x0F)
#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
-#define PM800_GPADC_PREBIAS2 (0x10)
+#define PM800_GPADC_PREBIAS2 (0x10)
-#define PM800_GP_BIAS_ENA1 (0x14)
-#define PM800_GPADC_GP_BIAS_EN0 (1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1 (1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2 (1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3 (1 << 3)
+#define PM800_GP_BIAS_ENA1 (0x14)
+#define PM800_GPADC_GP_BIAS_EN0 BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1 BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2 BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3 BIT(3)
#define PM800_GP_BIAS_OUT1 (0x15)
-#define PM800_BIAS_OUT_GP0 (1 << 0)
-#define PM800_BIAS_OUT_GP1 (1 << 1)
-#define PM800_BIAS_OUT_GP2 (1 << 2)
-#define PM800_BIAS_OUT_GP3 (1 << 3)
+#define PM800_BIAS_OUT_GP0 BIT(0)
+#define PM800_BIAS_OUT_GP1 BIT(1)
+#define PM800_BIAS_OUT_GP2 BIT(2)
+#define PM800_BIAS_OUT_GP3 BIT(3)
#define PM800_GPADC0_LOW_TH 0x20
#define PM800_GPADC1_LOW_TH 0x21
@@ -222,37 +222,37 @@ enum {
#define PM805_INT_STATUS1 (0x03)
-#define PM805_INT1_HP1_SHRT (1 << 0)
-#define PM805_INT1_HP2_SHRT (1 << 1)
-#define PM805_INT1_MIC_CONFLICT (1 << 2)
-#define PM805_INT1_CLIP_FAULT (1 << 3)
-#define PM805_INT1_LDO_OFF (1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK (1 << 5)
+#define PM805_INT1_HP1_SHRT BIT(0)
+#define PM805_INT1_HP2_SHRT BIT(1)
+#define PM805_INT1_MIC_CONFLICT BIT(2)
+#define PM805_INT1_CLIP_FAULT BIT(3)
+#define PM805_INT1_LDO_OFF BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK BIT(5)
#define PM805_INT_STATUS2 (0x04)
-#define PM805_INT2_MIC_DET (1 << 0)
-#define PM805_INT2_SHRT_BTN_DET (1 << 1)
-#define PM805_INT2_VOLM_BTN_DET (1 << 2)
-#define PM805_INT2_VOLP_BTN_DET (1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT (1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT (1 << 5)
+#define PM805_INT2_MIC_DET BIT(0)
+#define PM805_INT2_SHRT_BTN_DET BIT(1)
+#define PM805_INT2_VOLM_BTN_DET BIT(2)
+#define PM805_INT2_VOLP_BTN_DET BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT BIT(5)
#define PM805_INT_MASK1 (0x05)
#define PM805_INT_MASK2 (0x06)
-#define PM805_SHRT_BTN_DET (1 << 1)
+#define PM805_SHRT_BTN_DET BIT(1)
/* number of status and int reg in a row */
#define PM805_INT_REG_NUM (2)
#define PM805_MIC_DET1 (0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET BIT(0)
#define PM805_MIC_DET2 (0x08)
-#define PM805_MIC_DET_STATUS1 (0x09)
+#define PM805_MIC_DET_STATUS1 (0x09)
-#define PM805_MIC_DET_STATUS3 (0x0A)
-#define PM805_AUTO_SEQ_STATUS1 (0x0B)
-#define PM805_AUTO_SEQ_STATUS2 (0x0C)
+#define PM805_MIC_DET_STATUS3 (0x0A)
+#define PM805_AUTO_SEQ_STATUS1 (0x0B)
+#define PM805_AUTO_SEQ_STATUS2 (0x0C)
#define PM805_ADC_SETTING1 (0x10)
#define PM805_ADC_SETTING2 (0x11)
@@ -261,7 +261,7 @@ enum {
#define PM805_ADC_GAIN2 (0x13)
#define PM805_DMIC_SETTING (0x15)
#define PM805_DWS_SETTING (0x16)
-#define PM805_MIC_CONFLICT_STS (0x17)
+#define PM805_MIC_CONFLICT_STS (0x17)
#define PM805_PDM_SETTING1 (0x20)
#define PM805_PDM_SETTING2 (0x21)
@@ -270,11 +270,11 @@ enum {
#define PM805_PDM_CONTROL2 (0x24)
#define PM805_PDM_CONTROL3 (0x25)
-#define PM805_HEADPHONE_SETTING (0x26)
-#define PM805_HEADPHONE_GAIN_A2A (0x27)
-#define PM805_HEADPHONE_SHORT_STATE (0x28)
-#define PM805_EARPHONE_SETTING (0x29)
-#define PM805_AUTO_SEQ_SETTING (0x2A)
+#define PM805_HEADPHONE_SETTING (0x26)
+#define PM805_HEADPHONE_GAIN_A2A (0x27)
+#define PM805_HEADPHONE_SHORT_STATE (0x28)
+#define PM805_EARPHONE_SETTING (0x29)
+#define PM805_AUTO_SEQ_SETTING (0x2A)
struct pm80x_rtc_pdata {
int vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4f7..79e607e2f 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@ enum arizona_type {
WM5110 = 2,
WM8997 = 3,
WM8280 = 4,
+ WM8998 = 5,
+ WM1814 = 6,
};
#define ARIZONA_IRQ_GP1 0
@@ -165,6 +167,7 @@ static inline int wm5102_patch(struct arizona *arizona)
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4faad..1dc385850 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@ struct arizona_pdata {
* useful for systems where and I2S bus with multiple data
* lines is mastered.
*/
- int max_channels_clocked[ARIZONA_MAX_AIF];
+ unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
/** GPIO5 is used for jack detection */
bool jd_gpio5;
@@ -125,22 +125,22 @@ struct arizona_pdata {
unsigned int hpdet_channel;
/** Extra debounce timeout used during initial mic detection (ms) */
- int micd_detect_debounce;
+ unsigned int micd_detect_debounce;
/** GPIO for mic detection polarity */
int micd_pol_gpio;
/** Mic detect ramp rate */
- int micd_bias_start_time;
+ unsigned int micd_bias_start_time;
/** Mic detect sample rate */
- int micd_rate;
+ unsigned int micd_rate;
/** Mic detect debounce level */
- int micd_dbtime;
+ unsigned int micd_dbtime;
/** Mic detect timeout (ms) */
- int micd_timeout;
+ unsigned int micd_timeout;
/** Force MICBIAS on for mic detect */
bool micd_force_micbias;
@@ -162,6 +162,8 @@ struct arizona_pdata {
/**
* Mode of input structures
* One of the ARIZONA_INMODE_xxx values
+ * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+ * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
*/
int inmode[ARIZONA_MAX_INPUT];
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36e6..fdd70b3c7 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
#define ARIZONA_PWM_DRIVE_3 0x32
#define ARIZONA_WAKE_CONTROL 0x40
#define ARIZONA_SEQUENCE_CONTROL 0x41
+#define ARIZONA_SPARE_TRIGGERS 0x42
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
@@ -139,6 +140,7 @@
#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
+#define ARIZONA_MIC_DETECT_4 0x2AB
#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
#define ARIZONA_ISOLATION_CONTROL 0x2CB
#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
@@ -225,14 +227,18 @@
#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
#define ARIZONA_DRE_ENABLE 0x440
+#define ARIZONA_DRE_CONTROL_1 0x441
#define ARIZONA_DRE_CONTROL_2 0x442
#define ARIZONA_DRE_CONTROL_3 0x443
+#define ARIZONA_EDRE_ENABLE 0x448
#define ARIZONA_DAC_AEC_CONTROL_1 0x450
+#define ARIZONA_DAC_AEC_CONTROL_2 0x451
#define ARIZONA_NOISE_GATE_CONTROL 0x458
#define ARIZONA_PDM_SPK1_CTRL_1 0x490
#define ARIZONA_PDM_SPK1_CTRL_2 0x491
#define ARIZONA_PDM_SPK2_CTRL_1 0x492
#define ARIZONA_PDM_SPK2_CTRL_2 0x493
+#define ARIZONA_HP_TEST_CTRL_13 0x49A
#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
@@ -310,6 +316,10 @@
#define ARIZONA_AIF3_TX_ENABLES 0x599
#define ARIZONA_AIF3_RX_ENABLES 0x59A
#define ARIZONA_AIF3_FORCE_WRITE 0x59B
+#define ARIZONA_SPD1_TX_CONTROL 0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
#define ARIZONA_SLIMBUS_RATES_1 0x5E5
#define ARIZONA_SLIMBUS_RATES_2 0x5E6
@@ -643,6 +653,10 @@
#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809
#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
@@ -868,6 +882,7 @@
#define ARIZONA_GPIO5_CTRL 0xC04
#define ARIZONA_IRQ_CTRL_1 0xC0F
#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
+#define ARIZONA_GP_SWITCH_1 0xC18
#define ARIZONA_MISC_PAD_CTRL_1 0xC20
#define ARIZONA_MISC_PAD_CTRL_2 0xC21
#define ARIZONA_MISC_PAD_CTRL_3 0xC22
@@ -1169,6 +1184,13 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
+#define ARIZONA_FRF_COEFF_1 0x1700
+#define ARIZONA_FRF_COEFF_2 0x1701
+#define ARIZONA_FRF_COEFF_3 0x1702
+#define ARIZONA_FRF_COEFF_4 0x1703
+#define ARIZONA_V2_DAC_COMP_1 0x1704
+#define ARIZONA_V2_DAC_COMP_2 0x1705
+
/*
* Field Definitions.
@@ -1431,6 +1453,42 @@
#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
/*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */
+#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */
+#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */
+#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */
+#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */
+#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */
+#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */
+#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */
+
+/*
* R97 (0x61) - Sample Rate Sequence Select 1
*/
#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */
#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
@@ -2413,6 +2474,16 @@
#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
/*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */
+
+/*
* R707 (0x2C3) - Mic noise mix control 1
*/
#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
/*
* R785 (0x311) - ADC Digital Volume 1L
*/
+#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2560,6 +2637,12 @@
/*
* R789 (0x315) - ADC Digital Volume 1R
*/
+#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2604,6 +2687,12 @@
/*
* R793 (0x319) - ADC Digital Volume 2L
*/
+#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */
#define ARIZONA_IN_VU 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -3412,11 +3501,45 @@
#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
/*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
* R1090 (0x442) - DRE Control 2
*/
#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */
/*
* R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */
+
/*
* R1104 (0x450) - DAC AEC Control 1
*/
@@ -4308,6 +4474,86 @@
#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
/*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */
+
+/*
* R1507 (0x5E3) - SLIMbus Framer Ref Gear
*/
#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
/*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */
+
+/*
* R3104 (0xC20) - Misc Pad Ctrl 1
*/
#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
/*
* R3366 (0xD26) - Interrupt Raw Status 8
*/
+#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853fb..cc8ad1e1a 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
#define __LINUX_MFD_AXP20X_H
enum {
- AXP202_ID = 0,
+ AXP152_ID = 0,
+ AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP288_ID,
@@ -22,6 +23,24 @@ enum {
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
+#define AXP152_PWR_OP_MODE 0x01
+#define AXP152_LDO3456_DC1234_CTRL 0x12
+#define AXP152_ALDO_OP_MODE 0x13
+#define AXP152_LDO0_CTRL 0x15
+#define AXP152_DCDC2_V_OUT 0x23
+#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC1_V_OUT 0x26
+#define AXP152_DCDC3_V_OUT 0x27
+#define AXP152_ALDO12_V_OUT 0x28
+#define AXP152_DLDO1_V_OUT 0x29
+#define AXP152_DLDO2_V_OUT 0x2a
+#define AXP152_DCDC4_V_OUT 0x2b
+#define AXP152_V_OFF 0x31
+#define AXP152_OFF_CTRL 0x32
+#define AXP152_PEK_KEY 0x36
+#define AXP152_DCDC_FREQ 0x37
+#define AXP152_DCDC_MODE 0x80
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -69,6 +88,13 @@ enum {
#define AXP22X_CHRG_CTRL3 0x35
/* Interrupt */
+#define AXP152_IRQ1_EN 0x40
+#define AXP152_IRQ2_EN 0x41
+#define AXP152_IRQ3_EN 0x42
+#define AXP152_IRQ1_STATE 0x48
+#define AXP152_IRQ2_STATE 0x49
+#define AXP152_IRQ3_STATE 0x4a
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -127,6 +153,19 @@ enum {
#define AXP22X_PWREN_CTRL2 0x8d
/* GPIO */
+#define AXP152_GPIO0_CTRL 0x90
+#define AXP152_GPIO1_CTRL 0x91
+#define AXP152_GPIO2_CTRL 0x92
+#define AXP152_GPIO3_CTRL 0x93
+#define AXP152_LDOGPIO2_V_OUT 0x96
+#define AXP152_GPIO_INPUT 0x97
+#define AXP152_PWM0_FREQ_X 0x98
+#define AXP152_PWM0_FREQ_Y 0x99
+#define AXP152_PWM0_DUTY_CYCLE 0x9a
+#define AXP152_PWM1_FREQ_X 0x9b
+#define AXP152_PWM1_FREQ_Y 0x9c
+#define AXP152_PWM1_DUTY_CYCLE 0x9d
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -151,6 +190,12 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* OCV */
+#define AXP20X_RDC_H 0xba
+#define AXP20X_RDC_L 0xbb
+#define AXP20X_OCV(m) (0xc0 + (m))
+#define AXP20X_OCV_MAX 0xf
+
/* AXP22X specific registers */
#define AXP22X_BATLOW_THRES1 0xe6
@@ -218,6 +263,26 @@ enum {
/* IRQs */
enum {
+ AXP152_IRQ_LDO0IN_CONNECT = 1,
+ AXP152_IRQ_LDO0IN_REMOVAL,
+ AXP152_IRQ_ALDO0IN_CONNECT,
+ AXP152_IRQ_ALDO0IN_REMOVAL,
+ AXP152_IRQ_DCDC1_V_LOW,
+ AXP152_IRQ_DCDC2_V_LOW,
+ AXP152_IRQ_DCDC3_V_LOW,
+ AXP152_IRQ_DCDC4_V_LOW,
+ AXP152_IRQ_PEK_SHORT,
+ AXP152_IRQ_PEK_LONG,
+ AXP152_IRQ_TIMER,
+ AXP152_IRQ_PEK_RIS_EDGE,
+ AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_GPIO3_INPUT,
+ AXP152_IRQ_GPIO2_INPUT,
+ AXP152_IRQ_GPIO1_INPUT,
+ AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 000000000..376ba8436
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+ /* IRQ A */
+ DA9062_IRQ_ONKEY,
+ DA9062_IRQ_ALARM,
+ DA9062_IRQ_TICK,
+ DA9062_IRQ_WDG_WARN,
+ DA9062_IRQ_SEQ_RDY,
+ /* IRQ B*/
+ DA9062_IRQ_TEMP,
+ DA9062_IRQ_LDO_LIM,
+ DA9062_IRQ_DVC_RDY,
+ DA9062_IRQ_VDD_WARN,
+ /* IRQ C */
+ DA9062_IRQ_GPI0,
+ DA9062_IRQ_GPI1,
+ DA9062_IRQ_GPI2,
+ DA9062_IRQ_GPI3,
+ DA9062_IRQ_GPI4,
+
+ DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 000000000..97790d1b0
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID 0x62
+#define DA9062_PMIC_VARIANT_MRC_AA 0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT 1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON 0x000
+#define DA9062AA_STATUS_A 0x001
+#define DA9062AA_STATUS_B 0x002
+#define DA9062AA_STATUS_D 0x004
+#define DA9062AA_FAULT_LOG 0x005
+#define DA9062AA_EVENT_A 0x006
+#define DA9062AA_EVENT_B 0x007
+#define DA9062AA_EVENT_C 0x008
+#define DA9062AA_IRQ_MASK_A 0x00A
+#define DA9062AA_IRQ_MASK_B 0x00B
+#define DA9062AA_IRQ_MASK_C 0x00C
+#define DA9062AA_CONTROL_A 0x00E
+#define DA9062AA_CONTROL_B 0x00F
+#define DA9062AA_CONTROL_C 0x010
+#define DA9062AA_CONTROL_D 0x011
+#define DA9062AA_CONTROL_E 0x012
+#define DA9062AA_CONTROL_F 0x013
+#define DA9062AA_PD_DIS 0x014
+#define DA9062AA_GPIO_0_1 0x015
+#define DA9062AA_GPIO_2_3 0x016
+#define DA9062AA_GPIO_4 0x017
+#define DA9062AA_GPIO_WKUP_MODE 0x01C
+#define DA9062AA_GPIO_MODE0_4 0x01D
+#define DA9062AA_GPIO_OUT0_2 0x01E
+#define DA9062AA_GPIO_OUT3_4 0x01F
+#define DA9062AA_BUCK2_CONT 0x020
+#define DA9062AA_BUCK1_CONT 0x021
+#define DA9062AA_BUCK4_CONT 0x022
+#define DA9062AA_BUCK3_CONT 0x024
+#define DA9062AA_LDO1_CONT 0x026
+#define DA9062AA_LDO2_CONT 0x027
+#define DA9062AA_LDO3_CONT 0x028
+#define DA9062AA_LDO4_CONT 0x029
+#define DA9062AA_DVC_1 0x032
+#define DA9062AA_COUNT_S 0x040
+#define DA9062AA_COUNT_MI 0x041
+#define DA9062AA_COUNT_H 0x042
+#define DA9062AA_COUNT_D 0x043
+#define DA9062AA_COUNT_MO 0x044
+#define DA9062AA_COUNT_Y 0x045
+#define DA9062AA_ALARM_S 0x046
+#define DA9062AA_ALARM_MI 0x047
+#define DA9062AA_ALARM_H 0x048
+#define DA9062AA_ALARM_D 0x049
+#define DA9062AA_ALARM_MO 0x04A
+#define DA9062AA_ALARM_Y 0x04B
+#define DA9062AA_SECOND_A 0x04C
+#define DA9062AA_SECOND_B 0x04D
+#define DA9062AA_SECOND_C 0x04E
+#define DA9062AA_SECOND_D 0x04F
+#define DA9062AA_SEQ 0x081
+#define DA9062AA_SEQ_TIMER 0x082
+#define DA9062AA_ID_2_1 0x083
+#define DA9062AA_ID_4_3 0x084
+#define DA9062AA_ID_12_11 0x088
+#define DA9062AA_ID_14_13 0x089
+#define DA9062AA_ID_16_15 0x08A
+#define DA9062AA_ID_22_21 0x08D
+#define DA9062AA_ID_24_23 0x08E
+#define DA9062AA_ID_26_25 0x08F
+#define DA9062AA_ID_28_27 0x090
+#define DA9062AA_ID_30_29 0x091
+#define DA9062AA_ID_32_31 0x092
+#define DA9062AA_SEQ_A 0x095
+#define DA9062AA_SEQ_B 0x096
+#define DA9062AA_WAIT 0x097
+#define DA9062AA_EN_32K 0x098
+#define DA9062AA_RESET 0x099
+#define DA9062AA_BUCK_ILIM_A 0x09A
+#define DA9062AA_BUCK_ILIM_B 0x09B
+#define DA9062AA_BUCK_ILIM_C 0x09C
+#define DA9062AA_BUCK2_CFG 0x09D
+#define DA9062AA_BUCK1_CFG 0x09E
+#define DA9062AA_BUCK4_CFG 0x09F
+#define DA9062AA_BUCK3_CFG 0x0A0
+#define DA9062AA_VBUCK2_A 0x0A3
+#define DA9062AA_VBUCK1_A 0x0A4
+#define DA9062AA_VBUCK4_A 0x0A5
+#define DA9062AA_VBUCK3_A 0x0A7
+#define DA9062AA_VLDO1_A 0x0A9
+#define DA9062AA_VLDO2_A 0x0AA
+#define DA9062AA_VLDO3_A 0x0AB
+#define DA9062AA_VLDO4_A 0x0AC
+#define DA9062AA_VBUCK2_B 0x0B4
+#define DA9062AA_VBUCK1_B 0x0B5
+#define DA9062AA_VBUCK4_B 0x0B6
+#define DA9062AA_VBUCK3_B 0x0B8
+#define DA9062AA_VLDO1_B 0x0BA
+#define DA9062AA_VLDO2_B 0x0BB
+#define DA9062AA_VLDO3_B 0x0BC
+#define DA9062AA_VLDO4_B 0x0BD
+#define DA9062AA_BBAT_CONT 0x0C5
+#define DA9062AA_INTERFACE 0x105
+#define DA9062AA_CONFIG_A 0x106
+#define DA9062AA_CONFIG_B 0x107
+#define DA9062AA_CONFIG_C 0x108
+#define DA9062AA_CONFIG_D 0x109
+#define DA9062AA_CONFIG_E 0x10A
+#define DA9062AA_CONFIG_G 0x10C
+#define DA9062AA_CONFIG_H 0x10D
+#define DA9062AA_CONFIG_I 0x10E
+#define DA9062AA_CONFIG_J 0x10F
+#define DA9062AA_CONFIG_K 0x110
+#define DA9062AA_CONFIG_M 0x112
+#define DA9062AA_TRIM_CLDR 0x120
+#define DA9062AA_GP_ID_0 0x121
+#define DA9062AA_GP_ID_1 0x122
+#define DA9062AA_GP_ID_2 0x123
+#define DA9062AA_GP_ID_3 0x124
+#define DA9062AA_GP_ID_4 0x125
+#define DA9062AA_GP_ID_5 0x126
+#define DA9062AA_GP_ID_6 0x127
+#define DA9062AA_GP_ID_7 0x128
+#define DA9062AA_GP_ID_8 0x129
+#define DA9062AA_GP_ID_9 0x12A
+#define DA9062AA_GP_ID_10 0x12B
+#define DA9062AA_GP_ID_11 0x12C
+#define DA9062AA_GP_ID_12 0x12D
+#define DA9062AA_GP_ID_13 0x12E
+#define DA9062AA_GP_ID_14 0x12F
+#define DA9062AA_GP_ID_15 0x130
+#define DA9062AA_GP_ID_16 0x131
+#define DA9062AA_GP_ID_17 0x132
+#define DA9062AA_GP_ID_18 0x133
+#define DA9062AA_GP_ID_19 0x134
+#define DA9062AA_DEVICE_ID 0x181
+#define DA9062AA_VARIANT_ID 0x182
+#define DA9062AA_CUSTOMER_ID 0x183
+#define DA9062AA_CONFIG_ID 0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT 0
+#define DA9062AA_PAGE_MASK 0x3f
+#define DA9062AA_WRITE_MODE_SHIFT 6
+#define DA9062AA_WRITE_MODE_MASK BIT(6)
+#define DA9062AA_REVERT_SHIFT 7
+#define DA9062AA_REVERT_MASK BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT 0
+#define DA9062AA_NONKEY_MASK 0x01
+#define DA9062AA_DVC_BUSY_SHIFT 2
+#define DA9062AA_DVC_BUSY_MASK BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT 0
+#define DA9062AA_GPI0_MASK 0x01
+#define DA9062AA_GPI1_SHIFT 1
+#define DA9062AA_GPI1_MASK BIT(1)
+#define DA9062AA_GPI2_SHIFT 2
+#define DA9062AA_GPI2_MASK BIT(2)
+#define DA9062AA_GPI3_SHIFT 3
+#define DA9062AA_GPI3_MASK BIT(3)
+#define DA9062AA_GPI4_SHIFT 4
+#define DA9062AA_GPI4_MASK BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT 0
+#define DA9062AA_LDO1_ILIM_MASK 0x01
+#define DA9062AA_LDO2_ILIM_SHIFT 1
+#define DA9062AA_LDO2_ILIM_MASK BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT 2
+#define DA9062AA_LDO3_ILIM_MASK BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT 3
+#define DA9062AA_LDO4_ILIM_MASK BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT 0
+#define DA9062AA_TWD_ERROR_MASK 0x01
+#define DA9062AA_POR_SHIFT 1
+#define DA9062AA_POR_MASK BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT 2
+#define DA9062AA_VDD_FAULT_MASK BIT(2)
+#define DA9062AA_VDD_START_SHIFT 3
+#define DA9062AA_VDD_START_MASK BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT 4
+#define DA9062AA_TEMP_CRIT_MASK BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT 5
+#define DA9062AA_KEY_RESET_MASK BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT 6
+#define DA9062AA_NSHUTDOWN_MASK BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT 7
+#define DA9062AA_WAIT_SHUT_MASK BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT 0
+#define DA9062AA_E_NONKEY_MASK 0x01
+#define DA9062AA_E_ALARM_SHIFT 1
+#define DA9062AA_E_ALARM_MASK BIT(1)
+#define DA9062AA_E_TICK_SHIFT 2
+#define DA9062AA_E_TICK_MASK BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT 3
+#define DA9062AA_E_WDG_WARN_MASK BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT 4
+#define DA9062AA_E_SEQ_RDY_MASK BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT 5
+#define DA9062AA_EVENTS_B_MASK BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT 6
+#define DA9062AA_EVENTS_C_MASK BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT 1
+#define DA9062AA_E_TEMP_MASK BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT 3
+#define DA9062AA_E_LDO_LIM_MASK BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT 5
+#define DA9062AA_E_DVC_RDY_MASK BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT 7
+#define DA9062AA_E_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT 0
+#define DA9062AA_E_GPI0_MASK 0x01
+#define DA9062AA_E_GPI1_SHIFT 1
+#define DA9062AA_E_GPI1_MASK BIT(1)
+#define DA9062AA_E_GPI2_SHIFT 2
+#define DA9062AA_E_GPI2_MASK BIT(2)
+#define DA9062AA_E_GPI3_SHIFT 3
+#define DA9062AA_E_GPI3_MASK BIT(3)
+#define DA9062AA_E_GPI4_SHIFT 4
+#define DA9062AA_E_GPI4_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT 0
+#define DA9062AA_M_NONKEY_MASK 0x01
+#define DA9062AA_M_ALARM_SHIFT 1
+#define DA9062AA_M_ALARM_MASK BIT(1)
+#define DA9062AA_M_TICK_SHIFT 2
+#define DA9062AA_M_TICK_MASK BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT 3
+#define DA9062AA_M_WDG_WARN_MASK BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT 4
+#define DA9062AA_M_SEQ_RDY_MASK BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT 1
+#define DA9062AA_M_TEMP_MASK BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT 3
+#define DA9062AA_M_LDO_LIM_MASK BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT 5
+#define DA9062AA_M_DVC_RDY_MASK BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT 7
+#define DA9062AA_M_VDD_WARN_MASK BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT 0
+#define DA9062AA_M_GPI0_MASK 0x01
+#define DA9062AA_M_GPI1_SHIFT 1
+#define DA9062AA_M_GPI1_MASK BIT(1)
+#define DA9062AA_M_GPI2_SHIFT 2
+#define DA9062AA_M_GPI2_MASK BIT(2)
+#define DA9062AA_M_GPI3_SHIFT 3
+#define DA9062AA_M_GPI3_MASK BIT(3)
+#define DA9062AA_M_GPI4_SHIFT 4
+#define DA9062AA_M_GPI4_MASK BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT 0
+#define DA9062AA_SYSTEM_EN_MASK 0x01
+#define DA9062AA_POWER_EN_SHIFT 1
+#define DA9062AA_POWER_EN_MASK BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT 2
+#define DA9062AA_POWER1_EN_MASK BIT(2)
+#define DA9062AA_STANDBY_SHIFT 3
+#define DA9062AA_STANDBY_MASK BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT 4
+#define DA9062AA_M_SYSTEM_EN_MASK BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT 5
+#define DA9062AA_M_POWER_EN_MASK BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT 6
+#define DA9062AA_M_POWER1_EN_MASK BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT 1
+#define DA9062AA_WATCHDOG_PD_MASK BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT 2
+#define DA9062AA_FREEZE_EN_MASK BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT 3
+#define DA9062AA_NRES_MODE_MASK BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT 4
+#define DA9062AA_NONKEY_LOCK_MASK BIT(4)
+#define DA9062AA_NFREEZE_SHIFT 5
+#define DA9062AA_NFREEZE_MASK (0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT 7
+#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT 0
+#define DA9062AA_DEBOUNCING_MASK 0x07
+#define DA9062AA_AUTO_BOOT_SHIFT 3
+#define DA9062AA_AUTO_BOOT_MASK BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT 4
+#define DA9062AA_OTPREAD_EN_MASK BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT 5
+#define DA9062AA_SLEW_RATE_MASK (0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT 7
+#define DA9062AA_DEF_SUPPLY_MASK BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT 0
+#define DA9062AA_TWDSCALE_MASK 0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT 0
+#define DA9062AA_RTC_MODE_PD_MASK 0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT 1
+#define DA9062AA_RTC_MODE_SD_MASK BIT(1)
+#define DA9062AA_RTC_EN_SHIFT 2
+#define DA9062AA_RTC_EN_MASK BIT(2)
+#define DA9062AA_V_LOCK_SHIFT 7
+#define DA9062AA_V_LOCK_MASK BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT 0
+#define DA9062AA_WATCHDOG_MASK 0x01
+#define DA9062AA_SHUTDOWN_SHIFT 1
+#define DA9062AA_SHUTDOWN_MASK BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT 2
+#define DA9062AA_WAKE_UP_MASK BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT 0
+#define DA9062AA_GPI_DIS_MASK 0x01
+#define DA9062AA_PMIF_DIS_SHIFT 2
+#define DA9062AA_PMIF_DIS_MASK BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT 4
+#define DA9062AA_CLDR_PAUSE_MASK BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT 5
+#define DA9062AA_BBAT_DIS_MASK BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT 6
+#define DA9062AA_OUT32K_PAUSE_MASK BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT 7
+#define DA9062AA_PMCONT_DIS_MASK BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT 0
+#define DA9062AA_GPIO0_PIN_MASK 0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT 2
+#define DA9062AA_GPIO0_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT 3
+#define DA9062AA_GPIO0_WEN_MASK BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT 4
+#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT 6
+#define DA9062AA_GPIO1_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT 7
+#define DA9062AA_GPIO1_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT 0
+#define DA9062AA_GPIO2_PIN_MASK 0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT 2
+#define DA9062AA_GPIO2_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT 3
+#define DA9062AA_GPIO2_WEN_MASK BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT 4
+#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT 6
+#define DA9062AA_GPIO3_TYPE_MASK BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT 7
+#define DA9062AA_GPIO3_WEN_MASK BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT 0
+#define DA9062AA_GPIO4_PIN_MASK 0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT 2
+#define DA9062AA_GPIO4_TYPE_MASK BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT 3
+#define DA9062AA_GPIO4_WEN_MASK BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT 0
+#define DA9062AA_GPIO0_MODE_MASK 0x01
+#define DA9062AA_GPIO1_MODE_SHIFT 1
+#define DA9062AA_GPIO1_MODE_MASK BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT 2
+#define DA9062AA_GPIO2_MODE_MASK BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT 3
+#define DA9062AA_GPIO3_MODE_MASK BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT 4
+#define DA9062AA_GPIO4_MODE_MASK BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT 0
+#define DA9062AA_GPIO0_OUT_MASK 0x07
+#define DA9062AA_GPIO1_OUT_SHIFT 3
+#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT 6
+#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT 0
+#define DA9062AA_GPIO3_OUT_MASK 0x07
+#define DA9062AA_GPIO4_OUT_SHIFT 3
+#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT 0
+#define DA9062AA_BUCK2_EN_MASK 0x01
+#define DA9062AA_BUCK2_GPI_SHIFT 1
+#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT 3
+#define DA9062AA_BUCK2_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT 5
+#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT 0
+#define DA9062AA_BUCK1_EN_MASK 0x01
+#define DA9062AA_BUCK1_GPI_SHIFT 1
+#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT 3
+#define DA9062AA_BUCK1_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT 5
+#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT 0
+#define DA9062AA_BUCK4_EN_MASK 0x01
+#define DA9062AA_BUCK4_GPI_SHIFT 1
+#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT 3
+#define DA9062AA_BUCK4_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT 5
+#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT 0
+#define DA9062AA_BUCK3_EN_MASK 0x01
+#define DA9062AA_BUCK3_GPI_SHIFT 1
+#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT 3
+#define DA9062AA_BUCK3_CONF_MASK BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT 5
+#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT 0
+#define DA9062AA_LDO1_EN_MASK 0x01
+#define DA9062AA_LDO1_GPI_SHIFT 1
+#define DA9062AA_LDO1_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT 3
+#define DA9062AA_LDO1_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT 5
+#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT 7
+#define DA9062AA_LDO1_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT 0
+#define DA9062AA_LDO2_EN_MASK 0x01
+#define DA9062AA_LDO2_GPI_SHIFT 1
+#define DA9062AA_LDO2_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT 3
+#define DA9062AA_LDO2_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT 5
+#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT 7
+#define DA9062AA_LDO2_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT 0
+#define DA9062AA_LDO3_EN_MASK 0x01
+#define DA9062AA_LDO3_GPI_SHIFT 1
+#define DA9062AA_LDO3_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT 3
+#define DA9062AA_LDO3_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT 5
+#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT 7
+#define DA9062AA_LDO3_CONF_MASK BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT 0
+#define DA9062AA_LDO4_EN_MASK 0x01
+#define DA9062AA_LDO4_GPI_SHIFT 1
+#define DA9062AA_LDO4_GPI_MASK (0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT 3
+#define DA9062AA_LDO4_PD_DIS_MASK BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT 5
+#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT 7
+#define DA9062AA_LDO4_CONF_MASK BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT 0
+#define DA9062AA_VBUCK1_SEL_MASK 0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT 1
+#define DA9062AA_VBUCK2_SEL_MASK BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT 2
+#define DA9062AA_VBUCK4_SEL_MASK BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT 3
+#define DA9062AA_VBUCK3_SEL_MASK BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT 4
+#define DA9062AA_VLDO1_SEL_MASK BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT 5
+#define DA9062AA_VLDO2_SEL_MASK BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT 6
+#define DA9062AA_VLDO3_SEL_MASK BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT 7
+#define DA9062AA_VLDO4_SEL_MASK BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT 0
+#define DA9062AA_COUNT_SEC_MASK 0x3f
+#define DA9062AA_RTC_READ_SHIFT 7
+#define DA9062AA_RTC_READ_MASK BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT 0
+#define DA9062AA_COUNT_MIN_MASK 0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT 0
+#define DA9062AA_COUNT_HOUR_MASK 0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT 0
+#define DA9062AA_COUNT_DAY_MASK 0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT 0
+#define DA9062AA_COUNT_MONTH_MASK 0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT 0
+#define DA9062AA_COUNT_YEAR_MASK 0x3f
+#define DA9062AA_MONITOR_SHIFT 6
+#define DA9062AA_MONITOR_MASK BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT 0
+#define DA9062AA_ALARM_SEC_MASK 0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT 6
+#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT 0
+#define DA9062AA_ALARM_MIN_MASK 0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT 0
+#define DA9062AA_ALARM_HOUR_MASK 0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT 0
+#define DA9062AA_ALARM_DAY_MASK 0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT 0
+#define DA9062AA_ALARM_MONTH_MASK 0x0f
+#define DA9062AA_TICK_TYPE_SHIFT 4
+#define DA9062AA_TICK_TYPE_MASK BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT 5
+#define DA9062AA_TICK_WAKE_MASK BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT 0
+#define DA9062AA_ALARM_YEAR_MASK 0x3f
+#define DA9062AA_ALARM_ON_SHIFT 6
+#define DA9062AA_ALARM_ON_MASK BIT(6)
+#define DA9062AA_TICK_ON_SHIFT 7
+#define DA9062AA_TICK_ON_MASK BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT 0
+#define DA9062AA_SECONDS_A_MASK 0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT 0
+#define DA9062AA_SECONDS_B_MASK 0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT 0
+#define DA9062AA_SECONDS_C_MASK 0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT 0
+#define DA9062AA_SECONDS_D_MASK 0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT 0
+#define DA9062AA_SEQ_POINTER_MASK 0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT 4
+#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT 0
+#define DA9062AA_SEQ_TIME_MASK 0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT 4
+#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT 0
+#define DA9062AA_LDO1_STEP_MASK 0x0f
+#define DA9062AA_LDO2_STEP_SHIFT 4
+#define DA9062AA_LDO2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT 0
+#define DA9062AA_LDO3_STEP_MASK 0x0f
+#define DA9062AA_LDO4_STEP_SHIFT 4
+#define DA9062AA_LDO4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT 4
+#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT 0
+#define DA9062AA_BUCK1_STEP_MASK 0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT 4
+#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT 0
+#define DA9062AA_BUCK4_STEP_MASK 0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT 4
+#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT 0
+#define DA9062AA_GP_RISE1_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT 4
+#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT 0
+#define DA9062AA_GP_RISE2_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT 4
+#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT 0
+#define DA9062AA_GP_RISE3_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT 4
+#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT 0
+#define DA9062AA_GP_RISE4_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT 4
+#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT 0
+#define DA9062AA_GP_RISE5_STEP_MASK 0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT 4
+#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT 0
+#define DA9062AA_WAIT_STEP_MASK 0x0f
+#define DA9062AA_EN32K_STEP_SHIFT 4
+#define DA9062AA_EN32K_STEP_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT 0
+#define DA9062AA_SYSTEM_END_MASK 0x0f
+#define DA9062AA_POWER_END_SHIFT 4
+#define DA9062AA_POWER_END_MASK (0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT 0
+#define DA9062AA_MAX_COUNT_MASK 0x0f
+#define DA9062AA_PART_DOWN_SHIFT 4
+#define DA9062AA_PART_DOWN_MASK (0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT 0
+#define DA9062AA_WAIT_TIME_MASK 0x0f
+#define DA9062AA_WAIT_MODE_SHIFT 4
+#define DA9062AA_WAIT_MODE_MASK BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT 5
+#define DA9062AA_TIME_OUT_MASK BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT 6
+#define DA9062AA_WAIT_DIR_MASK (0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT 0
+#define DA9062AA_STABILISATION_TIME_MASK 0x07
+#define DA9062AA_CRYSTAL_SHIFT 3
+#define DA9062AA_CRYSTAL_MASK BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT 4
+#define DA9062AA_DELAY_MODE_MASK BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT 5
+#define DA9062AA_OUT_CLOCK_MASK BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT 6
+#define DA9062AA_RTC_CLOCK_MASK BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT 7
+#define DA9062AA_EN_32KOUT_MASK BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT 0
+#define DA9062AA_RESET_TIMER_MASK 0x3f
+#define DA9062AA_RESET_EVENT_SHIFT 6
+#define DA9062AA_RESET_EVENT_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT 0
+#define DA9062AA_BUCK3_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT 0
+#define DA9062AA_BUCK4_ILIM_MASK 0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT 0
+#define DA9062AA_BUCK1_ILIM_MASK 0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT 4
+#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT 6
+#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT 6
+#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3
+#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT 4
+#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT 6
+#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT 5
+#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT 6
+#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT 0
+#define DA9062AA_VBUCK2_A_MASK 0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT 7
+#define DA9062AA_BUCK2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT 0
+#define DA9062AA_VBUCK1_A_MASK 0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT 7
+#define DA9062AA_BUCK1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT 0
+#define DA9062AA_VBUCK4_A_MASK 0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT 7
+#define DA9062AA_BUCK4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT 0
+#define DA9062AA_VBUCK3_A_MASK 0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT 7
+#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT 0
+#define DA9062AA_VLDO1_A_MASK 0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT 7
+#define DA9062AA_LDO1_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT 0
+#define DA9062AA_VLDO2_A_MASK 0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT 7
+#define DA9062AA_LDO2_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT 0
+#define DA9062AA_VLDO3_A_MASK 0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT 7
+#define DA9062AA_LDO3_SL_A_MASK BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT 0
+#define DA9062AA_VLDO4_A_MASK 0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT 7
+#define DA9062AA_LDO4_SL_A_MASK BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT 0
+#define DA9062AA_VBUCK2_B_MASK 0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT 7
+#define DA9062AA_BUCK2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT 0
+#define DA9062AA_VBUCK1_B_MASK 0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT 7
+#define DA9062AA_BUCK1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT 0
+#define DA9062AA_VBUCK4_B_MASK 0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT 7
+#define DA9062AA_BUCK4_SL_B_MASK BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT 0
+#define DA9062AA_VBUCK3_B_MASK 0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT 7
+#define DA9062AA_BUCK3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT 0
+#define DA9062AA_VLDO1_B_MASK 0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT 7
+#define DA9062AA_LDO1_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT 0
+#define DA9062AA_VLDO2_B_MASK 0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT 7
+#define DA9062AA_LDO2_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT 0
+#define DA9062AA_VLDO3_B_MASK 0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT 7
+#define DA9062AA_LDO3_SL_B_MASK BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT 0
+#define DA9062AA_VLDO4_B_MASK 0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT 7
+#define DA9062AA_LDO4_SL_B_MASK BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT 0
+#define DA9062AA_BCHG_VSET_MASK 0x0f
+#define DA9062AA_BCHG_ISET_SHIFT 4
+#define DA9062AA_BCHG_ISET_MASK (0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT 4
+#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT 0
+#define DA9062AA_PM_I_V_MASK 0x01
+#define DA9062AA_PM_O_TYPE_SHIFT 2
+#define DA9062AA_PM_O_TYPE_MASK BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT 3
+#define DA9062AA_IRQ_TYPE_MASK BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT 4
+#define DA9062AA_PM_IF_V_MASK BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT 5
+#define DA9062AA_PM_IF_FMP_MASK BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT 6
+#define DA9062AA_PM_IF_HSM_MASK BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0
+#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT 4
+#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT 3
+#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT 4
+#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT 6
+#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT 0
+#define DA9062AA_GPI_V_MASK 0x01
+#define DA9062AA_NIRQ_MODE_SHIFT 1
+#define DA9062AA_NIRQ_MODE_MASK BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT 2
+#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT 5
+#define DA9062AA_FORCE_RESET_MASK BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT 0
+#define DA9062AA_BUCK1_AUTO_MASK 0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT 1
+#define DA9062AA_BUCK2_AUTO_MASK BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT 2
+#define DA9062AA_BUCK4_AUTO_MASK BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT 4
+#define DA9062AA_BUCK3_AUTO_MASK BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT 0
+#define DA9062AA_LDO1_AUTO_MASK 0x01
+#define DA9062AA_LDO2_AUTO_SHIFT 1
+#define DA9062AA_LDO2_AUTO_MASK BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT 2
+#define DA9062AA_LDO3_AUTO_MASK BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT 3
+#define DA9062AA_LDO4_AUTO_MASK BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT 3
+#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT 5
+#define DA9062AA_BUCK2_OD_MASK BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT 6
+#define DA9062AA_BUCK1_OD_MASK BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT 0
+#define DA9062AA_NONKEY_PIN_MASK 0x03
+#define DA9062AA_nONKEY_SD_SHIFT 2
+#define DA9062AA_nONKEY_SD_MASK BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT 3
+#define DA9062AA_WATCHDOG_SD_MASK BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT 4
+#define DA9062AA_KEY_SD_MODE_MASK BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT 5
+#define DA9062AA_HOST_SD_MODE_MASK BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT 6
+#define DA9062AA_INT_SD_MODE_MASK BIT(6)
+#define DA9062AA_LDO_SD_SHIFT 7
+#define DA9062AA_LDO_SD_MASK BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT 0
+#define DA9062AA_KEY_DELAY_MASK 0x03
+#define DA9062AA_SHUT_DELAY_SHIFT 2
+#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT 4
+#define DA9062AA_RESET_DURATION_MASK (0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT 6
+#define DA9062AA_TWOWIRE_TO_MASK BIT(6)
+#define DA9062AA_IF_RESET_SHIFT 7
+#define DA9062AA_IF_RESET_MASK BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT 0
+#define DA9062AA_GPIO0_PUPD_MASK 0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT 1
+#define DA9062AA_GPIO1_PUPD_MASK BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT 2
+#define DA9062AA_GPIO2_PUPD_MASK BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT 3
+#define DA9062AA_GPIO3_PUPD_MASK BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT 4
+#define DA9062AA_GPIO4_PUPD_MASK BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT 1
+#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT 3
+#define DA9062AA_WDG_MODE_MASK BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT 4
+#define DA9062AA_OSC_FRQ_MASK (0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT 0
+#define DA9062AA_TRIM_CLDR_MASK 0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT 0
+#define DA9062AA_GP_0_MASK 0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT 0
+#define DA9062AA_GP_1_MASK 0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT 0
+#define DA9062AA_GP_2_MASK 0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT 0
+#define DA9062AA_GP_3_MASK 0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT 0
+#define DA9062AA_GP_4_MASK 0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT 0
+#define DA9062AA_GP_5_MASK 0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT 0
+#define DA9062AA_GP_6_MASK 0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT 0
+#define DA9062AA_GP_7_MASK 0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT 0
+#define DA9062AA_GP_8_MASK 0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT 0
+#define DA9062AA_GP_9_MASK 0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT 0
+#define DA9062AA_GP_10_MASK 0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT 0
+#define DA9062AA_GP_11_MASK 0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT 0
+#define DA9062AA_GP_12_MASK 0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT 0
+#define DA9062AA_GP_13_MASK 0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT 0
+#define DA9062AA_GP_14_MASK 0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT 0
+#define DA9062AA_GP_15_MASK 0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT 0
+#define DA9062AA_GP_16_MASK 0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT 0
+#define DA9062AA_GP_17_MASK 0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT 0
+#define DA9062AA_GP_18_MASK 0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT 0
+#define DA9062AA_GP_19_MASK 0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT 0
+#define DA9062AA_DEV_ID_MASK 0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT 0
+#define DA9062AA_VRC_MASK 0x0f
+#define DA9062AA_MRC_SHIFT 4
+#define DA9062AA_MRC_MASK (0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT 0
+#define DA9062AA_CUST_ID_MASK 0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT 0
+#define DA9062AA_CONFIG_REV_MASK 0xff
+
+#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d822b..621af8212 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@ enum da9063_irqs {
DA9063_IRQ_COMP_1V2,
DA9063_IRQ_LDO_LIM,
DA9063_IRQ_REG_UVOV,
+ DA9063_IRQ_DVC_RDY,
DA9063_IRQ_VDD_MON,
DA9063_IRQ_WARN,
DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac782f..2b300b44f 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-/* Watchdog resources */
-#define ICH_RES_IO_TCO 0
-#define ICH_RES_IO_SMI 1
-#define ICH_RES_MEM_OFF 2
-#define ICH_RES_MEM_GCS_PMC 0
-
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000..095b121aa
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
+/*
+ * Common data shared between Maxim 77693 and 77843 drivers
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_COMMON_H
+#define __LINUX_MFD_MAX77693_COMMON_H
+
+enum max77693_types {
+ TYPE_MAX77693_UNKNOWN,
+ TYPE_MAX77693,
+ TYPE_MAX77843,
+
+ TYPE_MAX77693_NUM,
+};
+
+/*
+ * Shared also with max77843.
+ */
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *i2c_muic; /* 0x4A , MUIC */
+ struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
+ struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
+
+ enum max77693_types type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic; /* Only MAX77693 */
+ struct regmap *regmap_chg; /* Only MAX77843 */
+
+ struct regmap_irq_chip_data *irq_data_led;
+ struct regmap_irq_chip_data *irq_data_topsys;
+ struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+
+
+#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea6f..3c7a63b98 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+#define MAX77693_STATUS1_ADC_SHIFT 0
+#define MAX77693_STATUS1_ADCLOW_SHIFT 5
+#define MAX77693_STATUS1_ADCERR_SHIFT 6
+#define MAX77693_STATUS1_ADC1K_SHIFT 7
+#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
+#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
+#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
+#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
+
+#define MAX77693_STATUS2_CHGTYP_SHIFT 0
+#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77693_STATUS2_DCDTMR_SHIFT 4
+#define MAX77693_STATUS2_DXOVP_SHIFT 5
+#define MAX77693_STATUS2_VBVOLT_SHIFT 6
+#define MAX77693_STATUS2_VIDRM_SHIFT 7
+#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
+#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
+#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
+#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
+#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
+
+#define MAX77693_STATUS3_OVP_SHIFT 2
+#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
/* MAX77693 CDETCTRL1~2 register */
#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
| (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
| (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
| (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
| (0 << COMN1SW_SHIFT))
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77693_CONTROL2_ADCEN_SHIFT 1
+#define MAX77693_CONTROL2_CPEN_SHIFT 2
+#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
+#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
+#define MAX77693_CONTROL2_ACCDET_SHIFT 5
+#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77693_CONTROL2_RCPS_SHIFT 7
+#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
+#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
+#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
+#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
+#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
+#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
+#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
+#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
+
+#define MAX77693_CONTROL3_JIGSET_SHIFT 0
+#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
+#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
+#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
+#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
+#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
/* Slave addr = 0x90: Haptic */
enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
MAX77693_MUIC_IRQ_NR,
};
-struct max77693_dev {
- struct device *dev;
- struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
- struct i2c_client *muic; /* 0x4A , MUIC */
- struct i2c_client *haptic; /* 0x90 , Haptic */
-
- int type;
-
- struct regmap *regmap;
- struct regmap *regmap_muic;
- struct regmap *regmap_haptic;
-
- struct regmap_irq_chip_data *irq_data_led;
- struct regmap_irq_chip_data *irq_data_topsys;
- struct regmap_irq_chip_data *irq_data_charger;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
- int irq_gpio;
- struct mutex irqlock;
- int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
- int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
-};
-
-enum max77693_types {
- TYPE_MAX77693,
-};
-
-extern int max77693_irq_init(struct max77693_dev *max77686);
-extern void max77693_irq_exit(struct max77693_dev *max77686);
-extern int max77693_irq_resume(struct max77693_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace83..c19303b0c 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
/* MAX77843 STATUS register*/
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCERROR_SHIFT 6
-#define STATUS1_ADC1K_SHIFT 7
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DXOVP_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS3_VBADC_SHIFT 0
-#define STATUS3_VDNMON_SHIFT 4
-#define STATUS3_DNRES_SHIFT 5
-#define STATUS3_MPNACK_SHIFT 6
-
-#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
-#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
-#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
-#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
-#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
-#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
-#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
-#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
+#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
+#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
+#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
+#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
+#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
+#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
+#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
+#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
+#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
+#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
/* MAX77843 CONTROL register */
-#define CONTROL1_COMP1SW_SHIFT 0
-#define CONTROL1_COMP2SW_SHIFT 3
-#define CONTROL1_IDBEN_SHIFT 7
-#define CONTROL2_LOWPWR_SHIFT 0
-#define CONTROL2_ADCEN_SHIFT 1
-#define CONTROL2_CPEN_SHIFT 2
-#define CONTROL2_ACC_DET_SHIFT 5
-#define CONTROL2_USBCPINT_SHIFT 6
-#define CONTROL2_RCPS_SHIFT 7
-#define CONTROL3_JIGSET_SHIFT 0
-#define CONTROL4_ADCDBSET_SHIFT 0
-#define CONTROL4_USBAUTO_SHIFT 4
-#define CONTROL4_FCTAUTO_SHIFT 5
-#define CONTROL4_ADCMODE_SHIFT 6
-
-#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
-#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
-#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
-#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
+#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
+#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
+#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
+#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
+#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
+#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
+#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
+#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
+#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
+#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
/* MAX77843 switch port */
#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
#define COM_AUX_USB 4
#define COM_AUX_UART 5
-#define CONTROL1_COM_SW \
+#define MAX77843_MUIC_CONTROL1_COM_SW \
((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
-#define CONTROL1_SW_OPEN \
- ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
- COM_OPEN << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_USB \
- ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUDIO \
- ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
- COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_UART \
- ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_UART << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_USB \
- ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_UART \
- ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
- COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_OPEN \
+ ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_USB \
+ ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_UART \
+ ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
#define MAX77843_DISABLE 0
#define MAX77843_ENABLE 1
#define CONTROL4_AUTO_DISABLE \
- ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
#define CONTROL4_AUTO_ENABLE \
- ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
- (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+ ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
/* MAX77843 SAFEOUT LDO Control register */
#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
(0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
-struct max77843 {
- struct device *dev;
-
- struct i2c_client *i2c;
- struct i2c_client *i2c_chg;
- struct i2c_client *i2c_fuel;
- struct i2c_client *i2c_muic;
-
- struct regmap *regmap;
- struct regmap *regmap_chg;
- struct regmap *regmap_fuel;
- struct regmap *regmap_muic;
-
- struct regmap_irq_chip_data *irq_data;
- struct regmap_irq_chip_data *irq_data_chg;
- struct regmap_irq_chip_data *irq_data_fuel;
- struct regmap_irq_chip_data *irq_data_muic;
-
- int irq;
-};
#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b0d..45b8e8aa1 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@ struct mt6397_chip {
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
+ u16 wake_mask[2];
u16 irq_masks_cur[2];
u16 irq_masks_cache[2];
};
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index bb270bd03..13e1d9693 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/extcon.h>
+#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -551,10 +552,16 @@ struct palmas_usb {
int vbus_otg_irq;
int vbus_irq;
+ int gpio_id_irq;
+ struct gpio_desc *id_gpiod;
+ unsigned long sw_debounce_jiffies;
+ struct delayed_work wq_detectid;
+
enum palmas_usb_state linkstat;
int wakeup;
bool enable_vbus_detection;
bool enable_id_detection;
+ bool enable_gpio_id_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c82c..558a485d0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+/* For imx6ul iomux gpr register field define */
+#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
+#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
+#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
new file mode 100644
index 000000000..eb492d47f
--- /dev/null
+++ b/include/linux/microchipphy.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MICROCHIPPHY_H
+#define _MICROCHIPPHY_H
+
+#define LAN88XX_INT_MASK (0x19)
+#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
+#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
+
+#define LAN88XX_INT_STS (0x1A)
+#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
+#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
+#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
+#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
+#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
+#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
+#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
+#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
+#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
+#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
+#define LAN88XX_INT_STS_RESERVED_ (0x0010)
+#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
+#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define LAN88XX_INT_STS_RX_ER_ (0x0001)
+
+#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
+#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
+#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
+#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define LAN88XX_EXT_MODE_CTRL (0x13)
+#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
+#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+/* MMD 3 Registers */
+#define LAN88XX_MMD3_CHIP_ID (32877)
+#define LAN88XX_MMD3_CHIP_REV (32878)
+
+#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 819077c32..81f6e427b 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -67,7 +67,7 @@ struct miscdevice {
};
extern int misc_register(struct miscdevice *misc);
-extern int misc_deregister(struct miscdevice *misc);
+extern void misc_deregister(struct miscdevice *misc);
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1..09cebe528 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
- MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3..baad4cb8e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -79,7 +79,8 @@ enum {
enum {
MLX4_MAX_PORTS = 2,
- MLX4_MAX_PORT_PKEYS = 128
+ MLX4_MAX_PORT_PKEYS = 128,
+ MLX4_MAX_PORT_GIDS = 128
};
/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -211,6 +212,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+ MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
};
enum {
@@ -581,6 +584,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1336,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 9553a73d2..5a06d9693 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -59,6 +59,7 @@ struct mlx4_interface {
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
+ void (*activate)(struct mlx4_dev *dev, void *context);
struct list_head list;
enum mlx4_protocol protocol;
int flags;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5..de45a51b3 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
- MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
+ MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2..250b1ff8b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@ enum {
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
};
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2..8b6d6f215 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@ enum {
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
+ MLX5_REG_PPCNT = 0x5008,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
};
enum mlx5_port_status {
- MLX5_PORT_UP = 1 << 1,
- MLX5_PORT_DOWN = 1 << 2,
+ MLX5_PORT_UP = 1,
+ MLX5_PORT_DOWN = 2,
};
struct mlx5_uuar_info {
@@ -380,7 +382,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
- void __iomem *wc_map;
+ void __iomem *bf_map;
void __iomem *map;
};
@@ -435,6 +437,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+ struct io_mapping *bf_mapping;
+
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -463,6 +467,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
+ /* protect buffer alocation according to numa node */
+ struct mutex alloc_mutex;
+ int numa_node;
+
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@@ -672,6 +680,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee0..dd2097455 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
};
enum {
- MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
- MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
};
enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
struct mlx5_ifc_tisc_bits ctx;
};
+struct mlx5_ifc_modify_tir_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lro[0x1];
+};
+
struct mlx5_ifc_modify_tir_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
u8 reserved_4[0x40];
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_rqt_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 rqn_list[0x1];
+};
+
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_rqt_bitmask_bits bitmask;
u8 reserved_4[0x40];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6293566d7..79f680101 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -20,6 +20,7 @@
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
+#include <linux/err.h>
struct mempolicy;
struct anon_vma;
@@ -124,8 +125,10 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
@@ -245,7 +248,10 @@ struct vm_fault {
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
+ int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
+ pmd_t *, unsigned int flags);
void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* notification that a previously read-only page is about to become
@@ -304,18 +310,6 @@ struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
-/* It's valid only if the page is free path or free_list */
-static inline void set_freepage_migratetype(struct page *page, int migratetype)
-{
- page->index = migratetype;
-}
-
-/* It's valid only if the page is free path or free_list */
-static inline int get_freepage_migratetype(struct page *page)
-{
- return page->index;
-}
-
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
@@ -356,20 +350,15 @@ static inline int get_page_unless_zero(struct page *page)
return atomic_inc_not_zero(&page->_count);
}
-/*
- * Try to drop a ref unless the page has a refcount of one, return false if
- * that is the case.
- * This is to make sure that the refcount won't become zero after this drop.
- * This can be called when MMU is off so it must not access
- * any of the virtual mappings.
- */
-static inline int put_page_unless_one(struct page *page)
-{
- return atomic_add_unless(&page->_count, -1, 1);
-}
-
extern int page_is_ram(unsigned long pfn);
-extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
+
+enum {
+ REGION_INTERSECTS,
+ REGION_DISJOINT,
+ REGION_MIXED,
+};
+
+int region_intersects(resource_size_t offset, size_t size, const char *type);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -1269,6 +1258,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+
+/* Container for pinned pfns / pages */
+struct frame_vector {
+ unsigned int nr_allocated; /* Number of frames we have space for */
+ unsigned int nr_frames; /* Number of frames stored in ptrs array */
+ bool got_ref; /* Did we pin pages by getting page ref? */
+ bool is_pfns; /* Does array contain pages or pfns? */
+ void *ptrs[0]; /* Array of pinned pfns / pages. Use
+ * pfns_vector_pages() or pfns_vector_pfns()
+ * for access */
+};
+
+struct frame_vector *frame_vector_create(unsigned int nr_frames);
+void frame_vector_destroy(struct frame_vector *vec);
+int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
+ bool write, bool force, struct frame_vector *vec);
+void put_vaddr_frames(struct frame_vector *vec);
+int frame_vector_to_pages(struct frame_vector *vec);
+void frame_vector_to_pfns(struct frame_vector *vec);
+
+static inline unsigned int frame_vector_count(struct frame_vector *vec)
+{
+ return vec->nr_frames;
+}
+
+static inline struct page **frame_vector_pages(struct frame_vector *vec)
+{
+ if (vec->is_pfns) {
+ int err = frame_vector_to_pages(vec);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+ return (struct page **)(vec->ptrs);
+}
+
+static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
+{
+ if (!vec->is_pfns)
+ frame_vector_to_pfns(vec);
+ return (unsigned long *)(vec->ptrs);
+}
+
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
@@ -1300,6 +1332,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
static inline int stack_guard_page_start(struct vm_area_struct *vma,
unsigned long addr)
{
@@ -1876,7 +1913,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *);
+ struct mempolicy *, struct vm_userfaultfd_ctx);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1923,11 +1960,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+static inline unsigned long
+do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot, unsigned long flags,
+ unsigned long pgoff, unsigned long *populate)
+{
+ return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
+}
+
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
@@ -2227,6 +2272,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
+extern void put_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 995435f0d..3173639bd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -215,6 +215,9 @@ struct page_frag {
__u16 offset;
__u16 size;
#endif
+#ifdef CONFIG_UKSM
+ struct vma_slot *uksm_vma_slot;
+#endif
};
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
@@ -235,7 +238,7 @@ struct page_frag_cache {
bool pfmemalloc;
};
-typedef unsigned long __nocast vm_flags_t;
+typedef unsigned long vm_flags_t;
/*
* A region containing a mapping of a non-memory backed file under NOMMU
@@ -257,6 +260,16 @@ struct vm_region {
* this region */
};
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+ struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
/*
* This struct defines a memory VMM memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -324,9 +337,7 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
-#ifdef CONFIG_UKSM
- struct vma_slot *uksm_vma_slot;
-#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
struct core_thread {
@@ -548,6 +559,7 @@ enum tlb_flush_reason {
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
+ TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d3776d25..fdd0779cc 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -279,10 +279,13 @@ struct mmc_card {
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
+#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
+ unsigned int eg_boundary; /* don't cross erase-group boundaries */
u8 erased_byte; /* value of erased bytes */
u32 raw_cid[4]; /* raw card CID */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 5be97676f..134c57422 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -98,6 +98,7 @@ struct mmc_data;
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
* @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @dto_timer: Timer for broken data transfer over scheme.
*
* Locking
* =======
@@ -153,11 +154,7 @@ struct dw_mci {
dma_addr_t sg_dma;
void *sg_cpu;
const struct dw_mci_dma_ops *dma_ops;
-#ifdef CONFIG_MMC_DW_IDMAC
unsigned int ring_size;
-#else
- struct dw_mci_dma_data *dma_data;
-#endif
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
@@ -204,6 +201,7 @@ struct dw_mci {
int sdio_id0;
struct timer_list cmd11_timer;
+ struct timer_list dto_timer;
};
/* DMA ops for Internal/External DMAC interface */
@@ -226,6 +224,8 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
+/* Timer for broken data transfer over scheme */
+#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
struct dma_pdata;
@@ -259,7 +259,6 @@ struct dw_mci_board {
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
- struct block_settings *blk_settings;
};
#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1369e54fa..83b81fd86 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -412,7 +412,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
host->sdio_irq_pending = true;
- wake_up_process(host->sdio_irq_thread);
+ if (host->sdio_irq_thread)
+ wake_up_process(host->sdio_irq_thread);
}
void sdio_run_irqs(struct mmc_host *host);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 61cd67f4d..a1a210d59 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -66,6 +66,16 @@ struct mmu_notifier_ops {
unsigned long end);
/*
+ * clear_young is a lightweight version of clear_flush_young. Like the
+ * latter, it is supposed to test-and-clear the young/accessed bitflag
+ * in the secondary pte, but it may omit flushing the secondary tlb.
+ */
+ int (*clear_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /*
* test_young is called to check the young/accessed bitflag in
* the secondary pte. This is used to know if the page is
* frequently used without actually clearing the flag or tearing
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end);
+extern int __mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline int mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_young(mm, start, end);
+ return 0;
+}
+
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define ptep_clear_young_notify(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PAGE_SIZE); \
+ __young; \
+})
+
+#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
+ __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
+ ___address + PMD_SIZE); \
+ __young; \
+})
+
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
unsigned long ___addr = __address & PAGE_MASK; \
@@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_young_notify ptep_test_and_clear_young
+#define pmdp_clear_young_notify pmdp_test_and_clear_young
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4a1ba487a..47a9eae07 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -322,7 +322,11 @@ enum zone_type {
ZONE_HIGHMEM,
#endif
ZONE_MOVABLE,
+#ifdef CONFIG_ZONE_DEVICE
+ ZONE_DEVICE,
+#endif
__MAX_NR_ZONES
+
};
#ifndef __GENERATING_BOUNDS_H
@@ -693,14 +697,6 @@ struct zonelist {
#endif
};
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-struct node_active_region {
- unsigned long start_pfn;
- unsigned long end_pfn;
- int nid;
-};
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
@@ -797,6 +793,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
+static inline int zone_id(const struct zone *zone)
+{
+ struct pglist_data *pgdat = zone->zone_pgdat;
+
+ return zone - pgdat->node_zones;
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return zone_id(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return false;
+}
+#endif
+
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 34f25b7bf..688997a24 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -253,7 +253,7 @@ struct pcmcia_device_id {
__u32 prod_id_hash[4];
- /* not matched against in kernelspace*/
+ /* not matched against in kernelspace */
const char * prod_id[4];
/* not matched against */
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000..ef29eb2d6
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68ff..ad939d0ba 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,38 +14,85 @@ extern int pci_msi_ignore_mask;
/* Helper functions */
struct irq_data;
struct msi_desc;
+struct pci_dev;
+struct platform_msi_priv_data;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
+ struct msi_msg *msg);
+
+/**
+ * platform_msi_desc - Platform device specific msi descriptor data
+ * @msi_priv_data: Pointer to platform private data
+ * @msi_index: The index of the MSI descriptor for multi MSI
+ */
+struct platform_msi_desc {
+ struct platform_msi_priv_data *msi_priv_data;
+ u16 msi_index;
+};
+
+/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list: List head for management
+ * @irq: The base interrupt number
+ * @nvec_used: The number of vectors used
+ * @dev: Pointer to the device which uses this descriptor
+ * @msg: The last set MSI message cached for reuse
+ *
+ * @masked: [PCI MSI/X] Mask bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
+ * @platform: [platform] Platform device specific msi descriptor data
+ */
struct msi_desc {
- struct {
- __u8 is_msix : 1;
- __u8 multiple: 3; /* log2 num of messages allocated */
- __u8 multi_cap : 3; /* log2 num of messages supported */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
- } msi_attrib;
-
- u32 masked; /* mask bits */
- unsigned int irq;
- unsigned int nvec_used; /* number of messages */
- struct list_head list;
+ /* Shared device/bus type independent data */
+ struct list_head list;
+ unsigned int irq;
+ unsigned int nvec_used;
+ struct device *dev;
+ struct msi_msg msg;
union {
- void __iomem *mask_base;
- u8 mask_pos;
- };
- struct pci_dev *dev;
+ /* PCI MSI/X specific data */
+ struct {
+ u32 masked;
+ struct {
+ __u8 is_msix : 1;
+ __u8 multiple : 3;
+ __u8 multi_cap : 3;
+ __u8 maskbit : 1;
+ __u8 is_64 : 1;
+ __u16 entry_nr;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
+ };
- /* Last set MSI message */
- struct msi_msg msg;
+ /*
+ * Non PCI variants add their data structure here. New
+ * entries need to use a named structure. We want
+ * proper name spaces for this. The PCI part is
+ * anonymous for now as it would require an immediate
+ * tree wide cleanup.
+ */
+ struct platform_msi_desc platform;
+ };
};
/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
-#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define msi_desc_to_dev(desc) ((desc)->dev)
+#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
@@ -56,12 +103,17 @@ struct msi_desc {
#define for_each_pci_msi_entry(desc, pdev) \
for_each_msi_entry((desc), &(pdev)->dev)
-static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
- return desc->dev;
+ return NULL;
}
#endif /* CONFIG_PCI_MSI */
+struct msi_desc *alloc_msi_entry(struct device *dev);
+void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,9 +160,6 @@ struct msi_controller {
struct device *dev;
struct device_node *of_node;
struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
@@ -221,6 +270,12 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_msi_domain_free_irqs(struct device *dev);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 29975c73a..366cf7795 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
-#include <asm/io.h>
#include <asm/barrier.h>
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852..049d4b03c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...) \
+ do { \
+ if (0) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#endif
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd..210d11a75 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
+ clear_bit(NAPI_STATE_NPSVC, &n->state);
}
#ifdef CONFIG_SMP
@@ -766,6 +767,13 @@ struct netdev_phys_item_id {
unsigned char id_len;
};
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+ struct netdev_phys_item_id *b)
+{
+ return a->id_len == b->id_len &&
+ memcmp(a->id, b->id, a->id_len) == 0;
+}
+
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
@@ -1041,6 +1049,16 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ * bool proto_down);
+ * This function is used to pass protocol port error state information
+ * to the switch driver. The switch driver can react to the proto_down
+ * by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ * This function is used to get egress tunnel information for given skb.
+ * This is useful for retrieving outer tunnel header parameters while
+ * sampling packet.
+ *
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1229,10 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
+ int (*ndo_change_proto_down)(struct net_device *dev,
+ bool proto_down);
+ int (*ndo_fill_metadata_dst)(struct net_device *dev,
+ struct sk_buff *skb);
};
/**
@@ -1225,13 +1247,8 @@ struct net_device_ops {
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
* @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
* @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
* @IFF_WAN_HDLC: WAN HDLC device
* @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
* release skb->dst
@@ -1247,44 +1264,40 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
- IFF_SLAVE_INACTIVE = 1<<2,
- IFF_MASTER_8023AD = 1<<3,
- IFF_MASTER_ALB = 1<<4,
- IFF_BONDING = 1<<5,
- IFF_SLAVE_NEEDARP = 1<<6,
- IFF_ISATAP = 1<<7,
- IFF_MASTER_ARPMON = 1<<8,
- IFF_WAN_HDLC = 1<<9,
- IFF_XMIT_DST_RELEASE = 1<<10,
- IFF_DONT_BRIDGE = 1<<11,
- IFF_DISABLE_NETPOLL = 1<<12,
- IFF_MACVLAN_PORT = 1<<13,
- IFF_BRIDGE_PORT = 1<<14,
- IFF_OVS_DATAPATH = 1<<15,
- IFF_TX_SKB_SHARING = 1<<16,
- IFF_UNICAST_FLT = 1<<17,
- IFF_TEAM_PORT = 1<<18,
- IFF_SUPP_NOFCS = 1<<19,
- IFF_LIVE_ADDR_CHANGE = 1<<20,
- IFF_MACVLAN = 1<<21,
- IFF_XMIT_DST_RELEASE_PERM = 1<<22,
- IFF_IPVLAN_MASTER = 1<<23,
- IFF_IPVLAN_SLAVE = 1<<24,
+ IFF_BONDING = 1<<2,
+ IFF_ISATAP = 1<<3,
+ IFF_WAN_HDLC = 1<<4,
+ IFF_XMIT_DST_RELEASE = 1<<5,
+ IFF_DONT_BRIDGE = 1<<6,
+ IFF_DISABLE_NETPOLL = 1<<7,
+ IFF_MACVLAN_PORT = 1<<8,
+ IFF_BRIDGE_PORT = 1<<9,
+ IFF_OVS_DATAPATH = 1<<10,
+ IFF_TX_SKB_SHARING = 1<<11,
+ IFF_UNICAST_FLT = 1<<12,
+ IFF_TEAM_PORT = 1<<13,
+ IFF_SUPP_NOFCS = 1<<14,
+ IFF_LIVE_ADDR_CHANGE = 1<<15,
+ IFF_MACVLAN = 1<<16,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<17,
+ IFF_IPVLAN_MASTER = 1<<18,
+ IFF_IPVLAN_SLAVE = 1<<19,
+ IFF_VRF_MASTER = 1<<20,
+ IFF_NO_QUEUE = 1<<21,
+ IFF_OPENVSWITCH = 1<<22,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD IFF_MASTER_8023AD
-#define IFF_MASTER_ALB IFF_MASTER_ALB
#define IFF_BONDING IFF_BONDING
-#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
#define IFF_ISATAP IFF_ISATAP
-#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
#define IFF_WAN_HDLC IFF_WAN_HDLC
#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1301,6 +1314,9 @@ enum netdev_priv_flags {
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER IFF_VRF_MASTER
+#define IFF_NO_QUEUE IFF_NO_QUEUE
+#define IFF_OPENVSWITCH IFF_OPENVSWITCH
/**
* struct net_device - The DEVICE structure.
@@ -1417,6 +1433,7 @@ enum netdev_priv_flags {
* @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
+ * @vrf_ptr: VRF specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
* @last_rx: Time of last Rx
@@ -1448,6 +1465,8 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
+ * @offload_fwd_mark: Offload device fwding mark
+ *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1502,6 +1521,10 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+ * switch port.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1629,6 +1652,7 @@ struct net_device {
struct dn_dev __rcu *dn_ptr;
struct inet6_dev __rcu *ip6_ptr;
void *ax25_ptr;
+ struct net_vrf_dev __rcu *vrf_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1709,10 @@ struct net_device {
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ u32 offload_fwd_mark;
+#endif
+
/* These may be needed for future network-power-down code. */
/*
@@ -1762,6 +1790,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2093,6 +2122,13 @@ struct netdev_notifier_change_info {
unsigned int flags_changed;
};
+struct netdev_notifier_changeupper_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct net_device *upper_dev; /* new upper dev */
+ bool master; /* is upper dev master */
+ bool linking; /* is the nofication for link or unlink */
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2173,6 +2209,7 @@ void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2277,8 +2314,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
- skb_gro_offset(skb));
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
}
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2410,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
grc->delta = 0;
}
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
{
__wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start =
- ((unsigned char *)ptr + start) - skb->head;
- return;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
}
- delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
/* Adjust skb->csum since we changed the packet */
NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
- grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->offset = off + hdrlen + offset;
grc->delta = delta;
+
+ return ptr;
}
static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
struct gro_remcsum *grc)
{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
if (!grc->delta)
return;
- remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3039,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3781,6 +3839,42 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+ bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+ struct net_device *dev;
+
+ if (ifindex == 0)
+ return false;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = netif_is_vrf(dev);
+
+ rcu_read_unlock();
+#endif
+ return rc;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9..36a652531 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
};
/* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n);
+
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
u_int8_t pf, unsigned int hook)
{
- return !list_empty(nf_hook_list);
+ return !list_empty(hook_list);
}
#endif
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
- return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct sock *, struct sk_buff *),
int thresh)
{
- if (nf_hooks_active(pf, hook)) {
+ struct net *net = dev_net(indev ? indev : outdev);
+ struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+ if (nf_hook_list_active(hook_list, pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ nf_hook_state_init(&state, hook_list, hook, thresh,
pf, indev, outdev, sk, okfn);
return nf_hook_slow(skb, &state);
}
@@ -363,6 +368,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000..5d7cf36d4
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID 0
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK 1
+
+struct nf_conntrack_zone {
+ u16 id;
+ u8 flags;
+ u8 dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748..80ca889b1 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
#define _NFNL_ACCT_H_
#include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
enum {
NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
struct nf_acct;
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a56..b006b7191 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
+#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -222,7 +223,6 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int __percpu *stackptr;
void ***jumpstack;
unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
/**
* xt_write_recseq_begin - start of a write section
*
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686..2437b8a5d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_NF_BRIDGE_PREROUTING 0x08
-
int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+ return false;
+}
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f3a..771574677 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
#include <uapi/linux/netfilter_ipv6.h>
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -30,6 +21,14 @@ struct nf_ipv6_ops {
int (*output)(struct sock *, struct sk_buff *));
};
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
{
@@ -39,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9120edb65..639e9b8b0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
+
+extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask);
+static inline struct sk_buff *
+netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
+ gfp_t gfp_mask)
+{
+ return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
+}
+
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8e72aad9..00121f298 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -547,6 +547,24 @@ enum pnfs_notify_deviceid_type4 {
NOTIFY_DEVICEID4_DELETE = 1 << 2,
};
+enum pnfs_block_volume_type {
+ PNFS_BLOCK_VOLUME_SIMPLE = 0,
+ PNFS_BLOCK_VOLUME_SLICE = 1,
+ PNFS_BLOCK_VOLUME_CONCAT = 2,
+ PNFS_BLOCK_VOLUME_STRIPE = 3,
+};
+
+enum pnfs_block_extent_state {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2,
+ PNFS_BLOCK_NONE_DATA = 3,
+};
+
+/* on the wire size of a block layout extent */
+#define PNFS_BLOCK_EXTENT_SIZE \
+ (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+
#define NFL4_UFLG_MASK 0x0000003F
#define NFL4_UFLG_DENSE 0x00000001
#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 874b77228..c0e961474 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -353,7 +353,6 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -371,6 +370,7 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
+extern void nfs_file_clear_open_context(struct file *flip);
extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
extern u64 nfs_compat_user_ino64(u64 fileid);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20bc8e51b..570a7df27 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -173,6 +173,11 @@ struct nfs_server {
set of attributes supported
on this filesystem excluding
the label support bit. */
+ u32 exclcreat_bitmask[3];
+ /* V4 bitmask representing the
+ set of attributes supported
+ on this filesystem for the
+ exclusive create. */
u32 cache_consistency_bitmask[3];
/* V4 bitmask representing the subset
of change attribute, size, ctime
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7bbe50504..52faf7e96 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,7 +379,7 @@ struct nfs_openargs {
struct stateowner_id id;
union {
struct {
- struct iattr * attrs; /* UNCHECKED, GUARDED */
+ struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
nfs4_verifier verifier; /* EXCLUSIVE */
};
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
@@ -389,7 +389,7 @@ struct nfs_openargs {
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
const u32 * open_bitmap;
- __u32 claim;
+ enum open_claim_type4 claim;
enum createmode4 createmode;
const struct nfs4_label *label;
};
@@ -406,8 +406,8 @@ struct nfs_openres {
const struct nfs_server *server;
fmode_t delegation_type;
nfs4_stateid delegation;
+ unsigned long pagemod_limit;
__u32 do_recall;
- __u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
@@ -1057,11 +1057,13 @@ struct nfs4_statfs_res {
struct nfs4_server_caps_arg {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fhandle;
+ const u32 * bitmask;
};
struct nfs4_server_caps_res {
struct nfs4_sequence_res seq_res;
u32 attr_bitmask[3];
+ u32 exclcreat_bitmask[3];
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e65..78488e099 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@ static inline void touch_nmi_watchdog(void)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
#else
-static inline void hardlockup_detector_disable(void)
-{
-}
+static inline void hardlockup_detector_disable(void) {}
#endif
/*
@@ -49,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
arch_trigger_all_cpu_backtrace(false);
return true;
}
+
+/* generic implementation */
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask));
+bool nmi_cpu_backtrace(struct pt_regs *regs);
+
#else
static inline bool trigger_all_cpu_backtrace(void)
{
@@ -80,6 +84,17 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int proc_watchdog_cpumask(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int lockup_detector_suspend(void);
+extern void lockup_detector_resume(void);
+#else
+static inline int lockup_detector_suspend(void)
+{
+ return 0;
+}
+
+static inline void lockup_detector_resume(void)
+{
+}
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index b02f72bb8..f798e2afb 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -522,10 +522,9 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
* @speed: OUT - The link speed expressed as PCIe generation number.
* @width: OUT - The link width expressed as the number of PCIe lanes.
*
- * Set the translation of a memory window. The peer may access local memory
- * through the window starting at the address, up to the size. The address
- * must be aligned to the alignment specified by ntb_mw_get_range(). The size
- * must be aligned to the size alignment specified by ntb_mw_get_range().
+ * Get the current state of the ntb link. It is recommended to query the link
+ * state once after every link event. It is safe to query the link state in
+ * the context of the link event callback.
*
* Return: One if the link is up, zero if the link is down, otherwise a
* negative value indicating the error number.
@@ -795,7 +794,7 @@ static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
}
/**
- * ntb_peer_db_clear() - clear bits in the local doorbell register
+ * ntb_peer_db_clear() - clear bits in the peer doorbell register
* @ntb: NTB device context.
* @db_bits: Doorbell bits to clear.
*
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 286286136..7243eb98a 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -83,3 +83,4 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
void ntb_transport_link_up(struct ntb_transport_qp *qp);
void ntb_transport_link_down(struct ntb_transport_qp *qp);
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed8c..b5812c395 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@ struct nvme_bar {
__u32 cc; /* Controller Configuration */
__u32 rsvd1; /* Reserved */
__u32 csts; /* Controller Status */
- __u32 rsvd2; /* Reserved */
+ __u32 nssr; /* Subsystem Reset */
__u32 aqa; /* Admin Queue Attributes */
__u64 asq; /* Admin SQ Base Address */
__u64 acq; /* Admin CQ Base Address */
+ __u32 cmbloc; /* Controller Memory Buffer Location */
+ __u32 cmbsz; /* Controller Memory Buffer Size */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
@@ -55,6 +69,7 @@ enum {
NVME_CC_IOCQES = 4 << 20,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
+ NVME_CSTS_NSSRO = 1 << 4,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
@@ -97,9 +112,14 @@ struct nvme_dev {
char serial[20];
char model[40];
char firmware_rev[8];
+ bool subsystem;
u32 max_hw_sectors;
u32 stripe_size;
u32 page_size;
+ void __iomem *cmb;
+ dma_addr_t cmb_dma_addr;
+ u64 cmb_size;
+ u32 cmbsz;
u16 oncs;
u16 abort_limit;
u8 event_limit;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
new file mode 100644
index 000000000..9bb77d3ed
--- /dev/null
+++ b/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
+/*
+ * nvmem framework consumer.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_CONSUMER_H
+#define _LINUX_NVMEM_CONSUMER_H
+
+struct device;
+struct device_node;
+/* consumer cookie */
+struct nvmem_cell;
+struct nvmem_device;
+
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+/* Cell based interface */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+
+/* direct nvmem device read/write interface */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+ size_t bytes, void *buf);
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+
+#else
+
+static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_nvmem_cell_put(struct device *dev,
+ struct nvmem_cell *cell)
+{
+
+}
+static inline void nvmem_cell_put(struct nvmem_cell *cell)
+{
+}
+
+static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_cell_write(struct nvmem_cell *cell,
+ const char *buf, size_t len)
+{
+ return -ENOSYS;
+}
+
+static inline struct nvmem_device *nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void nvmem_device_put(struct nvmem_device *nvmem)
+{
+}
+
+static inline void devm_nvmem_device_put(struct device *dev,
+ struct nvmem_device *nvmem)
+{
+}
+
+static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_read(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+static inline int nvmem_device_write(struct nvmem_device *nvmem,
+ unsigned int offset, size_t bytes,
+ void *buf)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name);
+struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name);
+#else
+static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
+#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
new file mode 100644
index 000000000..0b68caff1
--- /dev/null
+++ b/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
+/*
+ * nvmem framework provider.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_PROVIDER_H
+#define _LINUX_NVMEM_PROVIDER_H
+
+struct nvmem_device;
+struct nvmem_cell_info;
+
+struct nvmem_config {
+ struct device *dev;
+ const char *name;
+ int id;
+ struct module *owner;
+ const struct nvmem_cell_info *cells;
+ int ncells;
+ bool read_only;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
+int nvmem_unregister(struct nvmem_device *nvmem);
+
+#else
+
+static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_unregister(struct nvmem_device *nvmem)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_NVMEM */
+
+#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d19..2194b8ca4 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+ return is_of_node(fwnode) ?
+ container_of(fwnode, struct device_node, fwnode) : NULL;
}
static inline bool of_have_populated_dt(void)
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe312b..f3191828f 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@ extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
return -ENOSYS;
}
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
#endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d884929a7..4bcbd586a 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -74,6 +74,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 611a69114..956a1006a 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,9 @@ extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent);
+extern int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent);
extern void of_platform_depopulate(struct device *parent);
#else
static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@ static inline int of_platform_populate(struct device_node *root,
{
return -ENODEV;
}
+static inline int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ return -ENODEV;
+}
static inline void of_platform_depopulate(struct device *parent) { }
#endif
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index c2bbf672b..d2fa9ca42 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -41,7 +41,7 @@ enum OID {
OID_signed_data, /* 1.2.840.113549.1.7.2 */
/* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
OID_email_address, /* 1.2.840.113549.1.9.1 */
- OID_content_type, /* 1.2.840.113549.1.9.3 */
+ OID_contentType, /* 1.2.840.113549.1.9.3 */
OID_messageDigest, /* 1.2.840.113549.1.9.4 */
OID_signingTime, /* 1.2.840.113549.1.9.5 */
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@ enum OID {
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
+ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
+ OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */
OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@ enum OID {
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
+ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
+ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
+ OID_sha224, /* 2.16.840.1.101.3.4.2.4 */
/* Distinguished Name attribute IDs [RFC 2256] */
OID_commonName, /* 2.5.4.3 */
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index e5a70132a..88fa8af2b 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
-#define INT_DMA_LCD 25
+#define INT_DMA_LCD (NR_IRQS_LEGACY + 25)
#define OMAP1_DMA_TOUT_IRQ (1 << 0)
#define OMAP_DMA_DROP_IRQ (1 << 1)
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7deecb7bc..03e625732 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -13,6 +13,27 @@ struct mem_cgroup;
struct task_struct;
/*
+ * Details of the page allocation that triggered the oom killer that are used to
+ * determine what should be killed.
+ */
+struct oom_control {
+ /* Used to determine cpuset */
+ struct zonelist *zonelist;
+
+ /* Used to determine mempolicy */
+ nodemask_t *nodemask;
+
+ /* Used to determine cpuset and node locality requirement */
+ const gfp_t gfp_mask;
+
+ /*
+ * order == -1 means the oom kill is required by sysrq, otherwise only
+ * for display purposes.
+ */
+ const int order;
+};
+
+/*
* Types of limitations to the nodes from which allocations may occur
*/
enum oom_constraint {
@@ -57,21 +78,18 @@ extern unsigned long oom_badness(struct task_struct *p,
extern int oom_kills_count(void);
extern void note_oom_kill(void);
-extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
- struct mem_cgroup *memcg, nodemask_t *nodemask,
- const char *message);
+ struct mem_cgroup *memcg, const char *message);
-extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask,
+extern void check_panic_on_oom(struct oom_control *oc,
+ enum oom_constraint constraint,
struct mem_cgroup *memcg);
-extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
- unsigned long totalpages, const nodemask_t *nodemask,
- bool force_kill);
+extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+ struct task_struct *task, unsigned long totalpages);
-extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
- int order, nodemask_t *mask, bool force_kill);
+extern bool out_of_memory(struct oom_control *oc);
extern void exit_oom_victim(void);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index bf858b220..df5a154ee 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -115,6 +115,10 @@ enum pageflags {
PG_toi_cbw, /* Copy the page before it is written to */
PG_toi_dirty, /* Page has been modified */
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+ PG_young,
+ PG_idle,
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -306,6 +310,13 @@ PAGEFLAG_FALSE(TOI_Untracked)
PAGEFLAG_FALSE(TOI_CBW)
#endif
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+TESTPAGEFLAG(Young, young)
+SETPAGEFLAG(Young, young)
+TESTCLEARFLAG(Young, young)
+PAGEFLAG(Idle, idle)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 2dc1e1697..047d64706 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages);
-/*
- * Internal functions. Changes pageblock's migrate type.
- */
-int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
-void unset_migratetype_isolate(struct page *page, unsigned migratetype);
struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index c42981cd9..17f118a82 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -26,6 +26,10 @@ enum page_ext_flags {
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_YOUNG,
+ PAGE_EXT_IDLE,
+#endif
};
/*
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
new file mode 100644
index 000000000..bf268fa92
--- /dev/null
+++ b/include/linux/page_idle.h
@@ -0,0 +1,110 @@
+#ifndef _LINUX_MM_PAGE_IDLE_H
+#define _LINUX_MM_PAGE_IDLE_H
+
+#include <linux/bitops.h>
+#include <linux/page-flags.h>
+#include <linux/page_ext.h>
+
+#ifdef CONFIG_IDLE_PAGE_TRACKING
+
+#ifdef CONFIG_64BIT
+static inline bool page_is_young(struct page *page)
+{
+ return PageYoung(page);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ SetPageYoung(page);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return TestClearPageYoung(page);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return PageIdle(page);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ SetPageIdle(page);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ ClearPageIdle(page);
+}
+#else /* !CONFIG_64BIT */
+/*
+ * If there is not enough space to store Idle and Young bits in page flags, use
+ * page ext flags instead.
+ */
+extern struct page_ext_operations page_idle_ops;
+
+static inline bool page_is_young(struct page *page)
+{
+ return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_young(struct page *page)
+{
+ set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return test_and_clear_bit(PAGE_EXT_YOUNG,
+ &lookup_page_ext(page)->flags);
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void set_page_idle(struct page *page)
+{
+ set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+ clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+}
+#endif /* CONFIG_64BIT */
+
+#else /* !CONFIG_IDLE_PAGE_TRACKING */
+
+static inline bool page_is_young(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_young(struct page *page)
+{
+}
+
+static inline bool test_and_clear_page_young(struct page *page)
+{
+ return false;
+}
+
+static inline bool page_is_idle(struct page *page)
+{
+ return false;
+}
+
+static inline void set_page_idle(struct page *page)
+{
+}
+
+static inline void clear_page_idle(struct page *page)
+{
+}
+
+#endif /* CONFIG_IDLE_PAGE_TRACKING */
+
+#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 72031785f..57e0b8250 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
#include <linux/pci.h>
-/* Address Translation Service */
-struct pci_ats {
- int pos; /* capability position */
- int stu; /* Smallest Translation Unit */
- int qdep; /* Invalidate Queue Depth */
- int ref_cnt; /* Physical Function reference count */
- unsigned int is_enabled:1; /* Enable bit is set */
-};
-
-#ifdef CONFIG_PCI_ATS
-
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-
-/**
- * pci_ats_enabled - query the ATS status
- * @dev: the PCI device
- *
- * Returns 1 if ATS capability is enabled, or 0 if not.
- */
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return dev->ats && dev->ats->is_enabled;
-}
-
-#else /* CONFIG_PCI_ATS */
-
-static inline int pci_enable_ats(struct pci_dev *dev, int ps)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_ats(struct pci_dev *dev)
-{
-}
-
-static inline int pci_ats_queue_depth(struct pci_dev *dev)
-{
- return -ENODEV;
-}
-
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
- return 0;
-}
-
-#endif /* CONFIG_PCI_ATS */
-
#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1d4eb6057..e90eb22de 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -345,6 +345,7 @@ struct pci_dev {
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Service */
unsigned int is_managed:1;
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
@@ -368,7 +369,6 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI
- struct list_head msi_list;
const struct attribute_group **msi_irq_groups;
#endif
struct pci_vpd *vpd;
@@ -377,7 +377,9 @@ struct pci_dev {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
- struct pci_ats *ats; /* Address Translation Service */
+ u16 ats_cap; /* ATS Capability offset */
+ u8 ats_stu; /* ATS Smallest Translation Unit */
+ atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
#endif
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -448,7 +450,8 @@ struct pci_bus {
struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */
- struct list_head slots; /* list of slots on this bus */
+ struct list_head slots; /* list of slots on this bus;
+ protected by pci_slot_mutex */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
struct resource busn_res; /* bus numbers routed to this bus */
@@ -740,10 +743,11 @@ struct pci_driver {
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
- PCIE_BUS_TUNE_OFF,
- PCIE_BUS_SAFE,
- PCIE_BUS_PERFORMANCE,
- PCIE_BUS_PEER2PEER,
+ PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
+ PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
+ PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
+ PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
+ PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
};
extern enum pcie_bus_config_types pcie_bus_config;
@@ -789,6 +793,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources,
+ struct msi_controller *msi);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
@@ -799,6 +807,11 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
void pci_destroy_slot(struct pci_slot *slot);
+#ifdef CONFIG_SYSFS
+void pci_dev_assign_slot(struct pci_dev *dev);
+#else
+static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
+#endif
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -965,6 +978,23 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
+static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
+{
+ pdev->irq = irq;
+ pdev->irq_managed = 1;
+}
+
+static inline void pci_reset_managed_irq(struct pci_dev *pdev)
+{
+ pdev->irq = 0;
+ pdev->irq_managed = 0;
+}
+
+static inline bool pci_has_managed_irq(struct pci_dev *pdev)
+{
+ return pdev->irq_managed && pdev->irq > 0;
+}
+
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
@@ -1197,6 +1227,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
dma_pool_create(name, &pdev->dev, size, align, allocation)
#define pci_pool_destroy(pool) dma_pool_destroy(pool)
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_zalloc(pool, flags, handle) \
+ dma_pool_zalloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
struct msix_entry {
@@ -1297,6 +1329,19 @@ int ht_create_irq(struct pci_dev *dev, int idx);
void ht_destroy_irq(unsigned int irq);
#endif /* CONFIG_HT_IRQ */
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+void pci_ats_init(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+#else
+static inline void pci_ats_init(struct pci_dev *d) { }
+static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1648,6 +1693,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
+int pcibios_alloc_irq(struct pci_dev *dev);
+void pcibios_free_irq(struct pci_dev *dev);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1664,6 +1711,7 @@ static inline void pci_mmcfg_late_init(void) { }
int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1845,10 +1893,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
+struct irq_domain;
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1871,6 +1921,8 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fcff8f865..d9ba49ced 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2332,6 +2332,15 @@
#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define PCI_VENDOR_ID_TECHWELL 0x1797
+#define PCI_DEVICE_ID_TECHWELL_6800 0x6800
+#define PCI_DEVICE_ID_TECHWELL_6801 0x6801
+#define PCI_DEVICE_ID_TECHWELL_6804 0x6804
+#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810
+#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811
+#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812
+#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57f3a1c55..8f16299ca 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@ do { \
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
/*
- * Operations with implied preemption protection. These operations can be
- * used without worrying about preemption. Note that interrupts may still
- * occur while an operation is in progress and if the interrupt modifies
- * the variable too then RMW actions may not be reliable.
+ * Operations with implied preemption/interrupt protection. These
+ * operations can be used without worrying about preemption or interrupt.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3e88c9a7d..834c4e52c 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -16,6 +16,7 @@ struct percpu_rw_semaphore {
};
extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
extern void percpu_up_read(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +32,23 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(brw, #brw, &rwsem_key); \
})
+
+#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+
+static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_release(&sem->rw_sem.dep_map, 1, ip);
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ if (!read)
+ sem->rw_sem.owner = NULL;
+#endif
+}
+
+static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
+ bool read, unsigned long ip)
+{
+ lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+}
+
#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000..bfa673bb8
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/include/asm/pmu.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ * interrupt and passed the address of the low level handler,
+ * and can be used to implement any platform specific handling
+ * before or after calling it.
+ */
+struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
+ }, \
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+ /*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+ raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
+};
+
+struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+ cpumask_t supported_cpus;
+ int *irq_affinity;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
+ int (*set_event_filter)(struct hw_perf_event *evt,
+ struct perf_event_attr *attr);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ u64 max_period;
+ struct platform_device *plat_device;
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
+
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 202780943..092a0e8a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
+static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b..4a4e3a092 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
+#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
* PHYs should register using this structure
*/
struct mii_bus {
+ struct module *owner;
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
return mdiobus_alloc_size(0);
}
-int mdiobus_register(struct mii_bus *bus);
+int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -330,6 +333,7 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
@@ -368,6 +372,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
@@ -424,6 +429,8 @@ struct phy_device {
struct net_device *attached_dev;
+ u8 mdix;
+
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +693,16 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
{
return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+ return phydev->is_pseudo_fixed_link;
}
/**
@@ -728,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
struct phy_c45_device_ids *c45_ids);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
+void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d53..2400d2ea4 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
+ struct fixed_phy_status *status,
+ int link_gpio);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int link_gpio,
struct device_node *np);
extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
+ struct fixed_phy_status *status,
+ int link_gpio)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 4b452c6a2..527a85c61 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -46,18 +46,6 @@ struct at91_cf_data {
#define AT91_IDE_SWAP_A0_A2 0x02
};
- /* USB Host */
-#define AT91_MAX_USBH_PORTS 3
-struct at91_usbh_data {
- int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
- int overcurrent_pin[AT91_MAX_USBH_PORTS];
- u8 ports; /* number of ports on root hub */
- u8 overcurrent_supported;
- u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
- u8 overcurrent_status[AT91_MAX_USBH_PORTS];
- u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
-};
-
/* NAND / SmartMedia */
struct atmel_nand_data {
int enable_pin; /* chip enable */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
index 02bf6ea31..695035a8d 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -10,16 +10,22 @@
* option) any later version.
*/
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
#include <linux/types.h>
+enum mxt_suspend_mode {
+ MXT_SUSPEND_DEEP_SLEEP = 0,
+ MXT_SUSPEND_T9_CTRL = 1,
+};
+
/* The platform data for the Atmel maXTouch touchscreen driver */
struct mxt_platform_data {
unsigned long irqflags;
u8 t19_num_keys;
const unsigned int *t19_keymap;
+ enum mxt_suspend_mode suspend_mode;
};
-#endif /* __LINUX_ATMEL_MXT_TS_H */
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 97baf831e..3af0da1f3 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
#ifndef __CLK_UX500_H
#define __CLK_UX500_H
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base);
+void u8500_clk_init(void);
+void u9540_clk_init(void);
+void u8540_clk_init(void);
#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519d2..000000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
- unsigned int gpio_base;
- unsigned int irq_base;
- unsigned int number_of_pins;
- const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 000000000..c68712aad
--- /dev/null
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
+
+/**
+ * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of value for each channel
+ * @n_values: Number of multiplexer channels
+ * @little_endian: Indicating if the register is in little endian
+ * @write_only: Reading the register is not allowed by hardware
+ * @classes: Optional I2C auto-detection classes
+ * @idle: Value to write to mux when idle
+ * @idle_in_use: indicate if idle value is in use
+ * @reg: Virtual address of the register to switch channel
+ * @reg_size: register size in bytes
+ */
+struct i2c_mux_reg_platform_data {
+ int parent;
+ int base_nr;
+ const unsigned int *values;
+ int n_values;
+ bool little_endian;
+ bool write_only;
+ const unsigned int *classes;
+ u32 idle;
+ bool idle_in_use;
+ void __iomem *reg;
+ resource_size_t reg_size;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 000000000..f16542c77
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS_PMC 0
+
+struct itco_wdt_platform_data {
+ char name[32];
+ unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed57f..eb8a6860e 100644
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
#ifndef __LEDS_KIRKWOOD_NS2_H
#define __LEDS_KIRKWOOD_NS2_H
+enum ns2_led_modes {
+ NS_V2_LED_OFF,
+ NS_V2_LED_ON,
+ NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+ enum ns2_led_modes mode;
+ int cmd_level;
+ int slow_level;
+};
+
struct ns2_led {
const char *name;
const char *default_trigger;
unsigned cmd;
unsigned slow;
+ int num_modes;
+ struct ns2_led_modval *modval;
};
struct ns2_led_platform_data {
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1efe..1b2ba24e4 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@ struct lp855x_rom_data {
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
*/
struct lp855x_platform_data {
const char *name;
@@ -145,7 +144,6 @@ struct lp855x_platform_data {
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
- struct regulator *supply;
};
#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index e1571efa3..95ccab3f4 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -45,5 +45,6 @@ struct esdhc_platform_data {
int max_bus_width;
bool support_vsel;
unsigned int delay_line;
+ unsigned int tuning_step; /* The delay cell steps in tuning procedure */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
index 7bae83b7c..646af6f8b 100644
--- a/include/linux/input/pixcir_ts.h
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -57,7 +57,6 @@ struct pixcir_i2c_chip_data {
struct pixcir_ts_platform_data {
int x_max;
int y_max;
- int gpio_attb; /* GPIO connected to ATTB line */
struct pixcir_i2c_chip_data chip;
};
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index 8dc2fa47a..f4edcb03c 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@ struct davinci_spi_platform_data {
u8 num_chipselect;
u8 intr_line;
u8 *chip_sel;
+ u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
};
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 000000000..54b044839
--- /dev/null
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
+/*
+ * MTK SPI bus driver definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
+#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
+
+/* Board specific platform_data */
+struct mtk_chip_config {
+ u32 tx_mlsb;
+ u32 rx_mlsb;
+};
+#endif
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 92fc2b223..699ac4109 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
#define __VIDEO_EP93XX_H
struct platform_device;
-struct fb_videomode;
struct fb_info;
-#define EP93XXFB_USE_MODEDB 0
-
/* VideoAttributes flags */
#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
@@ -38,12 +35,7 @@ struct fb_info;
EP93XXFB_PIXEL_DATA_ENABLE)
struct ep93xxfb_mach_info {
- unsigned int num_modes;
- const struct fb_videomode *modes;
- const struct fb_videomode *default_mode;
- int bpp;
unsigned int flags;
-
int (*setup)(struct platform_device *pdev);
void (*teardown)(struct platform_device *pdev);
void (*blank)(int blank_mode, struct fb_info *info);
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2f6..7bdece8ef 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
#define _LINUX_INPUT_ZFORCE_TS_H
struct zforce_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
unsigned int x_max;
unsigned int y_max;
};
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb053..b1cf7e797 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
- GPD_STATE_BUSY, /* Something is happening to the PM domain */
- GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */
};
@@ -59,9 +56,6 @@ struct generic_pm_domain {
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
- wait_queue_head_t status_wait_queue;
- struct task_struct *poweroff_task; /* Powering off task */
- unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
- int need_restore;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
return -ENOSYS;
}
static inline void pm_genpd_poweroff_unused(void) {}
-#define simple_qos_governor NULL
-#define pm_domain_always_on_gov NULL
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540..e817722ee 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,11 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -62,11 +66,26 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ return false;
+}
+
static inline int dev_pm_opp_get_opp_count(struct device *dev)
{
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ return NULL;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
@@ -115,6 +134,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int of_init_opp_table(struct device *dev);
void of_free_opp_table(struct device *dev);
+int of_cpumask_init_opp_table(cpumask_var_t cpumask);
+void of_cpumask_free_opp_table(cpumask_var_t cpumask);
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
#else
static inline int of_init_opp_table(struct device *dev)
{
@@ -124,6 +147,25 @@ static inline int of_init_opp_table(struct device *dev)
static inline void of_free_opp_table(struct device *dev)
{
}
+
+static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
+
+static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -ENOSYS;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0cff..0f65d36c2 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+int dev_pm_qos_expose_latency_tolerance(struct device *dev);
+void dev_pm_qos_hide_latency_tolerance(struct device *dev);
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{ return 0; }
+static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+ { return 0; }
+static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d48b..3bdbb4189 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
return dev->power.runtime_status == RPM_SUSPENDED;
}
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
-{
- return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
-}
-
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index d2114045a..85f810b33 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,28 +14,42 @@
#define __PMEM_H__
#include <linux/io.h>
+#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
-#include <asm/cacheflush.h>
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+#include <asm/pmem.h>
#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+/*
+ * These are simply here to enable compilation, all call sites gate
+ * calling these symbols with arch_has_pmem_api() and redirect to the
+ * implementation in asm/pmem.h.
+ */
+static inline bool __arch_has_wmb_pmem(void)
+{
+ return false;
+}
+
static inline void arch_wmb_pmem(void)
{
BUG();
}
-static inline bool __arch_has_wmb_pmem(void)
+static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t n)
{
- return false;
+ BUG();
}
-static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
{
- return NULL;
+ BUG();
+ return 0;
}
-static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
- size_t n)
+static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
BUG();
}
@@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
/*
* Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(),
- * arch_wmb_pmem(), and __arch_has_wmb_pmem().
+ * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
+ * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
*/
-
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
{
memcpy(dst, (void __force const *) src, size);
}
-static inline void memunmap_pmem(void __pmem *addr)
+static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
+{
+ devm_memunmap(dev, (void __force *) addr);
+}
+
+static inline bool arch_has_pmem_api(void)
{
- iounmap((void __force __iomem *) addr);
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
/**
@@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr)
*/
static inline bool arch_has_wmb_pmem(void)
{
- if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
- return __arch_has_wmb_pmem();
- return false;
-}
-
-static inline bool arch_has_pmem_api(void)
-{
- return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
+ return arch_has_pmem_api() && __arch_has_wmb_pmem();
}
/*
@@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void)
* default_memremap_pmem + default_memcpy_to_pmem is sufficient for
* making data durable relative to i/o completion.
*/
-static void default_memcpy_to_pmem(void __pmem *dst, const void *src,
+static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t size)
{
memcpy((void __force *) dst, src, size);
}
-static void __pmem *default_memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter_nocache((void __force *)addr, bytes, i);
+}
+
+static inline void default_clear_pmem(void __pmem *addr, size_t size)
{
- return (void __pmem __force *)ioremap_wt(offset, size);
+ if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
+ clear_page((void __force *)addr);
+ else
+ memset((void __force *)addr, 0, size);
}
/**
@@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset,
* wmb_pmem() arrange for the data to be written through the
* cache to persistent media.
*/
-static inline void __pmem *memremap_pmem(resource_size_t offset,
- unsigned long size)
+static inline void __pmem *memremap_pmem(struct device *dev,
+ resource_size_t offset, unsigned long size)
{
- if (arch_has_pmem_api())
- return arch_memremap_pmem(offset, size);
- return default_memremap_pmem(offset, size);
+ return (void __pmem *) devm_memremap(dev, offset, size,
+ ARCH_MEMREMAP_PMEM);
}
/**
@@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
*/
static inline void wmb_pmem(void)
{
- if (arch_has_pmem_api())
+ if (arch_has_wmb_pmem())
arch_wmb_pmem();
+ else
+ wmb();
+}
+
+/**
+ * copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr: PMEM destination address
+ * @bytes: number of bytes to copy
+ * @i: iterator with source data
+ *
+ * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ if (arch_has_pmem_api())
+ return arch_copy_from_iter_pmem(addr, bytes, i);
+ return default_copy_from_iter_pmem(addr, bytes, i);
+}
+
+/**
+ * clear_pmem - zero a PMEM memory range
+ * @addr: virtual start address
+ * @size: number of bytes to zero
+ *
+ * Write zeros into the memory range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline void clear_pmem(void __pmem *addr, size_t size)
+{
+ if (arch_has_pmem_api())
+ arch_clear_pmem(addr, size);
+ else
+ default_clear_pmem(addr, size);
}
#endif /* __PMEM_H__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81c5..317e16de0 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -19,8 +19,8 @@
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
@@ -69,10 +69,6 @@
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
-/********** net/ **********/
-#define NEIGHBOR_DEAD 0xdeadbeef
-#define NETFILTER_LINK_POISON 0xdead57ac
-
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
@@ -83,7 +79,4 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
-/********** sound/oss/ **********/
-#define OSS_POISON_FREE 0xAB
-
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index a6298b27a..9729565c2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -404,10 +404,10 @@ do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
@@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#else
+#elif defined(DEBUG)
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+#else
+static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+#endif
#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9c1..a59c6ee56 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
bool device_dma_is_coherent(struct device *dev);
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 00e8e8fa7..5440f64d2 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -33,7 +33,7 @@ struct prop_global {
/*
* global proportion descriptor
*
- * this is needed to consitently flip prop_global structures.
+ * this is needed to consistently flip prop_global structures.
*/
struct prop_descriptor {
int index;
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000..a682fcc91
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 987a73a40..061265f92 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -34,6 +34,7 @@
#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
/* single stepping state bits (used on ARM and PA-RISC) */
#define PT_SINGLESTEP_BIT 31
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 36262d08a..d681f6875 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -79,26 +79,43 @@ enum {
PWMF_EXPORTED = 1 << 2,
};
+/**
+ * struct pwm_device - PWM channel object
+ * @label: name of the PWM device
+ * @flags: flags associated with the PWM device
+ * @hwpwm: per-chip relative index of the PWM device
+ * @pwm: global index of the PWM device
+ * @chip: PWM chip providing this PWM device
+ * @chip_data: chip-private data associated with the PWM device
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * @polarity: polarity of the PWM signal
+ */
struct pwm_device {
- const char *label;
- unsigned long flags;
- unsigned int hwpwm;
- unsigned int pwm;
- struct pwm_chip *chip;
- void *chip_data;
-
- unsigned int period; /* in nanoseconds */
- unsigned int duty_cycle; /* in nanoseconds */
- enum pwm_polarity polarity;
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period;
+ unsigned int duty_cycle;
+ enum pwm_polarity polarity;
};
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+ return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
-static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
@@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
@@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
+{
+ return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+}
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
@@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
- int (*request)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*free)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip,
- struct pwm_device *pwm);
+ int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip,
- struct seq_file *s);
+ void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
- struct module *owner;
+ struct module *owner;
};
/**
@@ -160,22 +175,24 @@ struct pwm_ops {
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @pwms: array of PWM devices allocated by the framework
+ * @of_xlate: request a PWM device given a device tree PWM specifier
+ * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
* @can_sleep: must be true if the .config(), .enable() or .disable()
* operations may sleep
*/
struct pwm_chip {
- struct device *dev;
- struct list_head list;
- const struct pwm_ops *ops;
- int base;
- unsigned int npwm;
-
- struct pwm_device *pwms;
-
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
- const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
- bool can_sleep;
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
};
#if IS_ENABLED(CONFIG_PWM)
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 0485bab06..92273776b 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
QUARK_X1000_SSP,
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
LPSS_BYT_SSP,
+ LPSS_SPT_SSP,
};
struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca6601f..7a57c28eb 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
return 0;
}
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
{
+ return 0;
}
static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4cf5f51b4..581abf848 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -226,6 +226,36 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+ struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
+ __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -309,7 +339,7 @@ static inline void rcu_sysrq_end(void)
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
@@ -317,7 +347,7 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
@@ -392,10 +422,6 @@ bool __rcu_is_watching(void);
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-typedef void call_rcu_func_t(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
@@ -469,46 +495,10 @@ int rcu_read_lock_bh_held(void);
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise. Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section. This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes. This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
*/
#ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
- int lockdep_opinion = 0;
-
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- if (debug_locks)
- lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
@@ -545,6 +535,11 @@ static inline int rcu_read_lock_sched_held(void)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
+static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
+{
+}
+
#ifdef CONFIG_PROVE_RCU
/**
@@ -555,17 +550,32 @@ static inline int rcu_read_lock_sched_held(void)
#define rcu_lockdep_assert(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
+ deprecate_rcu_lockdep_assert(); \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
+/**
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define RCU_LOCKDEP_WARN(c, s) \
+ do { \
+ static bool __section(.data.unlikely) __warned; \
+ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ __warned = true; \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
+ } \
+ } while (0)
+
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +586,16 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh read-side critical section"); \
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +626,13 @@ static inline void rcu_preempt_sleep_check(void)
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
@@ -845,8 +856,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -896,8 +907,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +936,8 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
}
/*
@@ -936,8 +947,8 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
@@ -961,8 +972,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +990,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
@@ -1031,7 +1042,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_dereference_sparse(p, __rcu); \
- p = RCU_INITIALIZER(v); \
+ WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3df6c1ec4..ff968b7af 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return 0;
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ might_sleep();
+}
+
static inline void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 456879143..5abec82f3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@ void rcu_barrier_bh(void);
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
+unsigned long get_state_synchronize_sched(void);
+void cond_synchronize_sched(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea0f..8fc0bfd8e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/err.h>
#include <linux/bug.h>
+#include <linux/lockdep.h>
struct module;
struct device;
@@ -50,6 +51,20 @@ struct reg_default {
unsigned int def;
};
+/**
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ */
+struct reg_sequence {
+ unsigned int reg;
+ unsigned int def;
+ unsigned int delay_us;
+};
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -296,8 +311,12 @@ typedef void (*regmap_hw_free_context)(void *context);
* if not implemented on a given device.
* @async_write: Write operation which completes asynchronously, optional and
* must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ * write operation has to complete when returning from the function.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
* @async_alloc: Allocate a regmap_async() structure.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
@@ -307,7 +326,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* @val_format_endian_default: Default endianness for formatted register
* values. Used when the regmap_config specifies DEFAULT. If this is
* DEFAULT, BIG is assumed.
- * @async_size: Size of struct used for async work.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
*/
struct regmap_bus {
bool fast_io;
@@ -322,47 +342,186 @@ struct regmap_bus {
u8 read_flag_mask;
enum regmap_endian reg_format_endian_default;
enum regmap_endian val_format_endian_default;
+ size_t max_raw_read;
+ size_t max_raw_write;
};
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ fn(__VA_ARGS__, &_key, \
+ KBUILD_BASENAME ":" \
+ __stringify(__LINE__) ":" \
+ "(" name ")->lock"); \
+ }) \
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__regmap_init, #config, \
+ dev, bus, bus_context, config)
int regmap_attach_dev(struct device *dev, struct regmap *map,
- const struct regmap_config *config);
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
-
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spi(struct spi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config);
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config);
+ const struct regmap_config *config);
-bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * regmap_init_mmio_clk(): Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* regmap_init_mmio(): Initialise register map
@@ -374,12 +533,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-static inline struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define regmap_init_mmio(dev, regs, config) \
+ regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
+ ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * devm_regmap_init(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
+ dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c(): Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
/**
* devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +648,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev,
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
-{
- return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
-}
+#define devm_regmap_init_mmio(dev, regs, config) \
+ devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97(): Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
+ ac97, config)
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +676,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_multi_reg_write_bypassed(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
int num_regs);
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
@@ -424,6 +690,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -437,6 +705,8 @@ int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -450,7 +720,7 @@ void regcache_mark_dirty(struct regmap *map);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs);
int regmap_parse_val(struct regmap *map, const void *buf,
unsigned int *val);
@@ -503,6 +773,8 @@ int regmap_field_update_bits(struct regmap_field *field,
int regmap_fields_write(struct regmap_field *field, unsigned int id,
unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val);
int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -645,6 +917,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_update_bits_async(struct regmap *map,
unsigned int reg,
unsigned int mask, unsigned int val)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689ed6..9e0e76992 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
{
return 0;
}
+
+static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
+{
+ return -EINVAL;
+}
+
#endif
+static inline int regulator_set_voltage_triplet(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
+ return 0;
+
+ return regulator_set_voltage(regulator, min_uV, max_uV);
+}
+
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65acc2..a43a5ca11 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
/*
- * da9211.h - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
enum da9211_chip_id {
DA9211,
DA9213,
+ DA9215,
};
struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe48..45932228c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+ int (*set_over_current_protection) (struct regulator_dev *);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260..a1067d0b3 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned over_current_protection:1; /* auto disable on over current */
};
/**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000..847325939
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6311_H
+#define __LINUX_REGULATOR_MT6311_H
+
+#define MT6311_MAX_REGULATORS 2
+
+enum {
+ MT6311_ID_VDVFS = 0,
+ MT6311_ID_VBIASN,
+};
+
+#define MT6311_E1_CID_CODE 0x10
+#define MT6311_E2_CID_CODE 0x20
+#define MT6311_E3_CID_CODE 0x30
+
+#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/reset.h b/include/linux/reset.h
index da5602bd7..7f65f9cff 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
return -ENOSYS;
}
+static inline struct reset_control *__must_check reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a11..29446aeef 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ * and caller guarantees they will
+ * do a final flush if necessary */
};
#ifdef CONFIG_MMU
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c82..556ec1ea2 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
- BUG();
-#endif
-
/*
* offset and length are unused for chain entry. Clear them.
*/
@@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bfca8aa21..0c16e0292 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -339,8 +339,6 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
-extern int runqueue_is_locked(int cpu);
-
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -530,39 +528,49 @@ struct cpu_itimer {
};
/**
- * struct cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
+ * @lock: protects the above two fields
*
- * Gathers a generic snapshot of user and system time.
+ * Stores previous user/system time values such that we can guarantee
+ * monotonicity.
*/
-struct cputime {
+struct prev_cputime {
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
+ raw_spinlock_t lock;
+#endif
};
+static inline void prev_cputime_init(struct prev_cputime *prev)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ prev->utime = prev->stime = 0;
+ raw_spin_lock_init(&prev->lock);
+#endif
+}
+
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
- * This is an extension of struct cputime that includes the total runtime
- * spent by the task from the scheduler point of view.
- *
- * As a result, this structure groups together three kinds of CPU time
- * that are tracked for threads and thread groups. Most things considering
- * CPU time want to group these counts together and treat all three
- * of them in parallel.
+ * This structure groups together three kinds of CPU time that are tracked for
+ * threads and thread groups. Most things considering CPU time want to group
+ * these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
+
/* Alternate field names when used to cache expirations. */
-#define prof_exp stime
#define virt_exp utime
+#define prof_exp stime
#define sched_exp sum_exec_runtime
#define INIT_CPUTIME \
@@ -715,9 +723,7 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
-#endif
+ struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
@@ -1179,29 +1185,24 @@ struct load_weight {
u32 inv_weight;
};
+/*
+ * The load_avg/util_avg accumulates an infinite geometric series.
+ * 1) load_avg factors the amount of time that a sched_entity is
+ * runnable on a rq into its weight. For cfs_rq, it is the aggregated
+ * such weights of all runnable and blocked sched_entities.
+ * 2) util_avg factors frequency scaling into the amount of time
+ * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
+ * For cfs_rq, it is the aggregated such times of all runnable and
+ * blocked sched_entities.
+ * The 64 bit load_sum can:
+ * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
+ * the highest weight (=88761) always runnable, we should not overflow
+ * 2) for entity, support any load.weight always runnable
+ */
struct sched_avg {
- u64 last_runnable_update;
- s64 decay_count;
- /*
- * utilization_avg_contrib describes the amount of time that a
- * sched_entity is running on a CPU. It is based on running_avg_sum
- * and is scaled in the range [0..SCHED_LOAD_SCALE].
- * load_avg_contrib described the amount of time that a sched_entity
- * is runnable on a rq. It is based on both runnable_avg_sum and the
- * weight of the task.
- */
- unsigned long load_avg_contrib, utilization_avg_contrib;
- /*
- * These sums represent an infinite geometric series and so are bound
- * above by 1024/(1-y). Thus we only need a u32 to store them for all
- * choices of y < 1-2^(-32)*1024.
- * running_avg_sum reflects the time that the sched_entity is
- * effectively running on the CPU.
- * runnable_avg_sum represents the amount of time a sched_entity is on
- * a runqueue which includes the running time that is monitored by
- * running_avg_sum.
- */
- u32 runnable_avg_sum, avg_period, running_avg_sum;
+ u64 last_update_time, load_sum;
+ u32 util_sum, period_contrib;
+ unsigned long load_avg, util_avg;
};
#ifdef CONFIG_SCHEDSTATS
@@ -1267,7 +1268,7 @@ struct sched_entity {
#endif
#ifdef CONFIG_SMP
- /* Per-entity load-tracking */
+ /* Per entity load average tracking */
struct sched_avg avg;
#endif
};
@@ -1353,6 +1354,25 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ */
+ struct cpumask cpumask;
+
+ /* True if any bit in cpumask is set */
+ bool flush_required;
+
+ /*
+ * If true then the PTE was dirty when unmapped. The entry must be
+ * flushed before IO is initiated or a stale TLB entry potentially
+ * allows an update without redirtying the page.
+ */
+ bool writable;
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1360,22 +1380,41 @@ struct task_struct {
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS)
+ int on_cpu;
+#endif
#ifdef CONFIG_SMP
struct llist_node wake_entry;
- int on_cpu;
- struct task_struct *last_wakee;
- unsigned long wakee_flips;
+ unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
+ struct task_struct *last_wakee;
int wake_cpu;
#endif
int on_rq;
-
int prio, static_prio, normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_BFS
+ int time_slice;
+ u64 deadline;
+ struct list_head run_list;
+ u64 last_ran;
+ u64 sched_time; /* sched_clock time spent running */
+#ifdef CONFIG_SMT_NICE
+ int smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+#ifdef CONFIG_SMP
+ bool sticky; /* Soft affined flag */
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ bool zerobound; /* Bound to CPU0 for hotplug */
+#endif
+ unsigned long rt_timeout;
+#else /* CONFIG_SCHED_BFS */
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -1492,10 +1531,11 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
- cputime_t gtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- struct cputime prev_cputime;
+#ifdef CONFIG_SCHED_BFS
+ unsigned long utime_pc, stime_pc;
#endif
+ cputime_t gtime;
+ struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
@@ -1711,6 +1751,10 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
struct rcu_head rcu;
/*
@@ -1804,6 +1848,63 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifdef CONFIG_SCHED_BFS
+bool grunqueue_is_locked(void);
+void grq_unlock_wait(void);
+void cpu_scaling(int cpu);
+void cpu_nonscaling(int cpu);
+#define tsk_seruntime(t) ((t)->sched_time)
+#define tsk_rttimeout(t) ((t)->rt_timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+}
+
+static inline int runqueue_is_locked(int cpu)
+{
+ return grunqueue_is_locked();
+}
+
+void print_scheduler_version(void);
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return (p->policy == SCHED_ISO);
+}
+#else /* CFS */
+extern int runqueue_is_locked(int cpu);
+static inline void cpu_scaling(int cpu)
+{
+}
+
+static inline void cpu_nonscaling(int cpu)
+{
+}
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+ p->nr_cpus_allowed = current->nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"CFS CPU scheduler.\n");
+}
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return false;
+}
+
+/* Anyone feel like implementing this? */
+static inline bool above_background_load(void)
+{
+ return false;
+}
+#endif /* CONFIG_SCHED_BFS */
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -2226,13 +2327,6 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#ifndef CONFIG_CPUMASK_OFFSTACK
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
- return set_cpus_allowed_ptr(p, &new_mask);
-}
-#endif
-
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2298,7 +2392,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS)
extern void sched_exec(void);
#else
#define sched_exec() {}
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index d9cf5a576..7d5d0b861 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -19,8 +19,20 @@
*/
#define MAX_USER_RT_PRIO 100
+
+#ifdef CONFIG_SCHED_BFS
+/* Note different MAX_RT_PRIO */
+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
+
+#define ISO_PRIO (MAX_RT_PRIO)
+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define IDLE_PRIO (MAX_RT_PRIO + 2)
+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
+#else /* CONFIG_SCHED_BFS */
#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#endif /* CONFIG_SCHED_BFS */
+
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index a19ddacda..f4265039a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
static inline int seccomp_mode(struct seccomp *s)
{
- return 0;
+ return SECCOMP_MODE_DISABLED;
}
#endif /* CONFIG_SECCOMP */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index d4c727138..dde00defb 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -114,13 +114,22 @@ int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
-int seq_escape(struct seq_file *, const char *, const char *);
-int seq_putc(struct seq_file *m, char c);
-int seq_puts(struct seq_file *m, const char *s);
int seq_write(struct seq_file *seq, const void *data, size_t len);
-__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
-__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
+__printf(2, 0)
+void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
+__printf(2, 3)
+void seq_printf(struct seq_file *m, const char *fmt, ...);
+void seq_putc(struct seq_file *m, char c);
+void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+ unsigned long long num);
+void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_escape(struct seq_file *m, const char *s, const char *esc);
+
+void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, const void *buf, size_t len,
+ bool ascii);
int seq_path(struct seq_file *, const struct path *, const char *);
int seq_file_path(struct seq_file *, struct file *, const char *);
@@ -134,10 +143,6 @@ int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
-int seq_put_decimal_ull(struct seq_file *m, char delimiter,
- unsigned long long num);
-int seq_put_decimal_ll(struct seq_file *m, char delimiter,
- long long num);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ba82c07fe..0d4ef6921 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -136,8 +136,6 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
-extern void serial8250_early_out(struct uart_port *port, int offset, int value);
extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
@@ -152,6 +150,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
+void serial8250_init_port(struct uart_8250_port *up);
+void serial8250_set_defaults(struct uart_8250_port *up);
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count, unsigned int loglevel);
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7a2..df4ab5de1 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
#include <linux/mod_devicetable.h>
#include <uapi/linux/serio.h>
+extern struct bus_type serio_bus;
+
struct serio {
void *port_data;
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba502c..d927647e6 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@ void shdma_cleanup(struct shdma_dev *sdev);
#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
#else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 275158803..439841123 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <net/flow_dissector.h>
#include <linux/splice.h>
#include <linux/in6.h>
+#include <net/flow.h>
/* A. Checksumming of received packets by device.
*
@@ -173,17 +174,24 @@ struct nf_bridge_info {
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
} orig_proto:8;
- bool pkt_otherhost;
+ u8 pkt_otherhost:1;
+ u8 in_prerouting:1;
+ u8 bridged_dnat:1;
__u16 frag_max_size;
- unsigned int mask;
struct net_device *physindev;
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
+ struct net_device *physoutdev;
union {
- struct net_device *physoutdev;
- char neigh_header[8];
- };
- union {
+ /* prerouting: detect dnat in orig/reply direction */
__be32 ipv4_daddr;
struct in6_addr ipv6_daddr;
+
+ /* after prerouting + nat detected: store original source
+ * mac since neigh resolution overwrites it, only used while
+ * skb is out in neigh layer.
+ */
+ char neigh_header[8];
};
};
#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
+ * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
+ union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
+ __u32 secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ __u32 offload_fwd_mark;
#endif
+ };
+
union {
__u32 mark;
__u32 reserved_tailroom;
@@ -922,14 +937,90 @@ enum pkt_hash_types {
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+static inline void skb_clear_hash(struct sk_buff *skb)
{
- skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->hash = 0;
skb->sw_hash = 0;
+ skb->l4_hash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_hash)
+ skb_clear_hash(skb);
+}
+
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+ skb->l4_hash = is_l4;
+ skb->sw_hash = is_sw;
skb->hash = hash;
}
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ /* Used by drivers to set hash from HW */
+ __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+ __skb_set_hash(skb, hash, true, is_l4);
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen,
+ unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, unsigned int flags)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen,
+ unsigned int flags)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen, flags);
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
return skb->hash;
}
-static inline void skb_clear_hash(struct sk_buff *skb)
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
{
- skb->hash = 0;
- skb->sw_hash = 0;
- skb->l4_hash = 0;
+ if (!skb->l4_hash && !skb->sw_hash) {
+ struct flow_keys keys;
+ __u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+
+ return skb->hash;
}
-static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
- if (!skb->l4_hash)
- skb_clear_hash(skb);
+ return skb->hash;
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1943,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect_flow_keys(skb, &keys))
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2670,12 +2776,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
skb_shinfo(skb)->frag_list = NULL;
}
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
- frag->next = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = frag;
-}
-
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@ -3467,5 +3567,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e524..7e37d448e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@ void *__kmalloc(size_t size, gfp_t flags);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
void kmem_cache_free(struct kmem_cache *, void *);
+/*
+ * Bulk allocation and freeing operations. These are accellerated in an
+ * allocator specific way to avoid taking locks repeatedly or building
+ * metadata structures unnecessarily.
+ *
+ * Note that interrupts must be enabled when calling these functions.
+ */
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593f9..e6109a6cd 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@ struct smp_hotplug_thread {
const char *thread_comm;
};
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *cpumask);
+
+static inline int
+smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+{
+ return smpboot_register_percpu_thread_cpumask(plug_thread,
+ cpu_possible_mask);
+}
+
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *);
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000..9c99f84bc
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
+#ifndef LINUX_SOC_DOVE_PMU_H
+#define LINUX_SOC_DOVE_PMU_H
+
+int dove_init_pmu(void);
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 000000000..a5714e93f
--- /dev/null
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
+#ifndef __SOC_MEDIATEK_INFRACFG_H
+#define __SOC_MEDIATEK_INFRACFG_H
+
+#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
+#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
+#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
+#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6)
+#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12)
+#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13)
+#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16)
+#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17)
+#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18)
+#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19)
+#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
+#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
+
+#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 000000000..2a53dcaee
--- /dev/null
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
+#ifndef __QCOM_SMD_RPM_H__
+#define __QCOM_SMD_RPM_H__
+
+struct qcom_smd_rpm;
+
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+
+/*
+ * Constants used for addressing resources in the RPM.
+ */
+#define QCOM_SMD_RPM_BOOST 0x61747362
+#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
+#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
+#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
+#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
+#define QCOM_SMD_RPM_LDOA 0x616f646c
+#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
+#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
+#define QCOM_SMD_RPM_NCPA 0x6170636E
+#define QCOM_SMD_RPM_NCPB 0x6270636E
+#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
+#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_SMPA 0x61706d73
+#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SPDM 0x63707362
+#define QCOM_SMD_RPM_VSA 0x00617376
+
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 resource_type, u32 resource_id,
+ void *buf, size_t count);
+
+#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000..d7e50aa6a
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,46 @@
+#ifndef __QCOM_SMD_H__
+#define __QCOM_SMD_H__
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct qcom_smd;
+struct qcom_smd_channel;
+struct qcom_smd_lookup;
+
+/**
+ * struct qcom_smd_device - smd device struct
+ * @dev: the device struct
+ * @channel: handle to the smd channel for this device
+ */
+struct qcom_smd_device {
+ struct device dev;
+ struct qcom_smd_channel *channel;
+};
+
+/**
+ * struct qcom_smd_driver - smd driver struct
+ * @driver: underlying device driver
+ * @probe: invoked when the smd channel is found
+ * @remove: invoked when the smd channel is closed
+ * @callback: invoked when an inbound message is received on the channel,
+ * should return 0 on success or -EBUSY if the data cannot be
+ * consumed at this time
+ */
+struct qcom_smd_driver {
+ struct device_driver driver;
+ int (*probe)(struct qcom_smd_device *dev);
+ void (*remove)(struct qcom_smd_device *dev);
+ int (*callback)(struct qcom_smd_device *, const void *, size_t);
+};
+
+int qcom_smd_driver_register(struct qcom_smd_driver *drv);
+void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+
+#define module_qcom_smd_driver(__smd_driver) \
+ module_driver(__smd_driver, qcom_smd_driver_register, \
+ qcom_smd_driver_unregister)
+
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#endif
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000..bc9630d3a
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
+#ifndef __QCOM_SMEM_H__
+#define __QCOM_SMEM_H__
+
+#define QCOM_SMEM_HOST_ANY -1
+
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
+
+int qcom_smem_get_free_space(unsigned host);
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d67307234..6b00f18f5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
#include <linux/scatterlist.h>
struct dma_chan;
+struct spi_master;
+struct spi_transfer;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,59 @@ struct dma_chan;
extern struct bus_type spi_bus_type;
/**
+ * struct spi_statistics - statistics for spi transfers
+ * @lock: lock protecting this structure
+ *
+ * @messages: number of spi-messages handled
+ * @transfers: number of spi_transfers handled
+ * @errors: number of errors during spi_transfer
+ * @timedout: number of timeouts during spi_transfer
+ *
+ * @spi_sync: number of times spi_sync is used
+ * @spi_sync_immediate:
+ * number of times spi_sync is executed immediately
+ * in calling context without queuing and scheduling
+ * @spi_async: number of times spi_async is used
+ *
+ * @bytes: number of bytes transferred to/from device
+ * @bytes_tx: number of bytes sent to device
+ * @bytes_rx: number of bytes received from device
+ *
+ */
+struct spi_statistics {
+ spinlock_t lock; /* lock for the whole structure */
+
+ unsigned long messages;
+ unsigned long transfers;
+ unsigned long errors;
+ unsigned long timedout;
+
+ unsigned long spi_sync;
+ unsigned long spi_sync_immediate;
+ unsigned long spi_async;
+
+ unsigned long long bytes;
+ unsigned long long bytes_rx;
+ unsigned long long bytes_tx;
+
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_master *master);
+
+#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
+ do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&(stats)->lock, flags); \
+ (stats)->field += count; \
+ spin_unlock_irqrestore(&(stats)->lock, flags); \
+ } while (0)
+
+#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
+ SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+
+/**
* struct spi_device - Master side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @master: SPI controller used with the device.
@@ -60,6 +115,8 @@ extern struct bus_type spi_bus_type;
* @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
* when not using a GPIO line)
*
+ * @statistics: statistics for the spi_device
+ *
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
*
@@ -98,6 +155,9 @@ struct spi_device {
char modalias[SPI_NAME_SIZE];
int cs_gpio; /* chip select gpio */
+ /* the statistics */
+ struct spi_statistics statistics;
+
/*
* likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -296,6 +356,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
+ * @statistics: statistics for the spi_master
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +513,9 @@ struct spi_master {
/* gpio chip select */
int *cs_gpios;
+ /* statistics */
+ struct spi_statistics statistics;
+
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0063b24b4..47dd0cebd 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,16 +130,6 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
-/*
- * Place this after a lock-acquisition primitive to guarantee that
- * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
- * if the UNLOCK and LOCK are executed by the same CPU or if the
- * UNLOCK and LOCK operate on the same lock variable.
- */
-#ifndef smp_mb__after_unlock_lock
-#define smp_mb__after_unlock_lock() do { } while (0)
-#endif
-
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
@@ -296,7 +286,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
@@ -307,17 +297,17 @@ do { \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
-static inline void spin_lock(spinlock_t *lock)
+static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
-static inline void spin_lock_bh(spinlock_t *lock)
+static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
-static inline int spin_trylock(spinlock_t *lock)
+static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
@@ -337,7 +327,7 @@ do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
-static inline void spin_lock_irq(spinlock_t *lock)
+static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -352,32 +342,32 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
-static inline void spin_unlock(spinlock_t *lock)
+static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
-static inline void spin_unlock_bh(spinlock_t *lock)
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
-static inline void spin_unlock_irq(spinlock_t *lock)
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
-static inline int spin_trylock_bh(spinlock_t *lock)
+static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
-static inline int spin_trylock_irq(spinlock_t *lock)
+static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
@@ -387,22 +377,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-static inline void spin_unlock_wait(spinlock_t *lock)
+static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
-static inline int spin_is_locked(spinlock_t *lock)
+static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
-static inline int spin_is_contended(spinlock_t *lock)
+static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
-static inline int spin_can_lock(spinlock_t *lock)
+static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91..eead8ab93 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
- void *custom_cfg;
- void *custom_data;
void *bsp_priv;
};
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
- int has_gmac;
- int enh_desc;
- int tx_coe;
- int rx_coe;
- int bugged_jumbo;
- int pmt;
- int riwt_off;
- void (*fix_mac_speed)(void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *(*setup)(struct platform_device *pdev);
- void (*free)(struct platform_device *pdev, void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
-};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index d2abbdb8c..414d92431 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -112,25 +112,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
-int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
-/**
- * __stop_machine: freeze the machine on all CPUs and run this function
- * @fn: the function to run
- * @data: the data ptr for the @fn
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
- *
- * Description: This is a special version of the above, which assumes cpus
- * won't come or go while it's being called. Used by hotplug cpu.
- */
-int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-
-int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
-
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
-static inline int __stop_machine(int (*fn)(void *), void *data,
+static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
return ret;
}
-static inline int stop_machine(int (*fn)(void *), void *data,
- const struct cpumask *cpus)
-{
- return __stop_machine(fn, data, cpus);
-}
-
-static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
- return __stop_machine(fn, data, cpus);
+ return stop_machine(fn, data, cpus);
}
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
diff --git a/include/linux/string.h b/include/linux/string.h
index a8d90db9c..9ef7795e6 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 71f711db4..dabe643eb 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_HEX 0x20
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
- unsigned int flags, const char *esc);
+ unsigned int flags, const char *only);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char *dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *only)
{
- return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
+ return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
}
static inline int string_escape_str(const char *src, char *dst, size_t sz,
- unsigned int flags, const char *esc)
+ unsigned int flags, const char *only)
{
- return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
+ return string_escape_mem(src, strlen(src), dst, sz, flags, only);
}
static inline int string_escape_str_any_np(const char *src, char *dst,
- size_t sz, const char *esc)
+ size_t sz, const char *only)
{
- return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
#endif
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53be..5c9c6cd08 100644
--- a/include/linux/sunrpc/addr.h
+++ b/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@ static inline void rpc_set_port(struct sockaddr *sap,
#define IPV6_SCOPE_DELIMITER '%'
#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
return true;
}
#else /* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
const struct sockaddr *sap2)
{
return false;
@@ -122,15 +122,28 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
if (sap1->sa_family == sap2->sa_family) {
switch (sap1->sa_family) {
case AF_INET:
- return __rpc_cmp_addr4(sap1, sap2);
+ return rpc_cmp_addr4(sap1, sap2);
case AF_INET6:
- return __rpc_cmp_addr6(sap1, sap2);
+ return rpc_cmp_addr6(sap1, sap2);
}
}
return false;
}
/**
+ * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ */
+static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (!rpc_cmp_addr(sap1, sap2))
+ return false;
+ return rpc_get_port(sap1) == rpc_get_port(sap2);
+}
+
+/**
* rpc_copy_addr - copy the address portion of one sockaddr to another
* @dst: destination sockaddr
* @src: source sockaddr
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index a7cbb570c..1ecf13e14 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
#include <linux/atomic.h>
#include <linux/rcupdate.h>
#include <linux/uidgid.h>
+#include <linux/utsname.h>
-/* size of the nodename buffer */
-#define UNX_MAXNODENAME 32
+/*
+ * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
+ * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
+ */
+#define UNX_MAXNODENAME __NEW_UTS_LEN
struct rpcsec_gss_info;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6c4..03d3b4c92 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
*
*/
struct cache_head {
- struct cache_head * next;
+ struct hlist_node cache_list;
time_t expiry_time; /* After time time, don't use the data */
time_t last_refresh; /* If CACHE_PENDING, this is when upcall
* was sent, else this is when update was received
@@ -73,7 +73,7 @@ struct cache_detail_pipefs {
struct cache_detail {
struct module * owner;
int hash_size;
- struct cache_head ** hash_table;
+ struct hlist_head * hash_table;
rwlock_t hash_lock;
atomic_t inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@ extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
umode_t, struct cache_detail *);
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+/* Must store cache_detail in seq_file->private if using next three functions */
+extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop(struct seq_file *file, void *p);
+
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb947..cc0fc712b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
#include <linux/wait.h>
#include <linux/mm.h>
-/*
- * This is the RPC server thread function prototype
- */
-typedef int (*svc_thread_fn)(void *);
-
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
@@ -54,6 +49,25 @@ struct svc_pool {
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+struct svc_serv;
+
+struct svc_serv_ops {
+ /* Callback to use when last thread exits. */
+ void (*svo_shutdown)(struct svc_serv *, struct net *);
+
+ /* function for service threads to run */
+ int (*svo_function)(void *);
+
+ /* queue up a transport for servicing */
+ void (*svo_enqueue_xprt)(struct svc_xprt *);
+
+ /* set up thread (or whatever) execution context */
+ int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+
+ /* optional module to count when adding threads (pooled svcs only) */
+ struct module *svo_module;
+};
+
/*
* RPC service.
*
@@ -85,16 +99,7 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
-
- void (*sv_shutdown)(struct svc_serv *serv,
- struct net *net);
- /* Callback to use when last thread
- * exits.
- */
-
- struct module * sv_module; /* optional module to count when
- * adding threads */
- svc_thread_fn sv_function; /* main function for threads */
+ struct svc_serv_ops *sv_ops; /* server operations */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
@@ -423,19 +428,46 @@ struct svc_procedure {
};
/*
+ * Mode for mapping cpus to pools.
+ */
+enum {
+ SVC_POOL_AUTO = -1, /* choose one of the others */
+ SVC_POOL_GLOBAL, /* no mapping, just a single global pool
+ * (legacy & UP mode) */
+ SVC_POOL_PERCPU, /* one pool per cpu */
+ SVC_POOL_PERNODE /* one pool per numa node */
+};
+
+struct svc_pool_map {
+ int count; /* How many svc_servs use us */
+ int mode; /* Note: int not enum to avoid
+ * warnings about "enumeration value
+ * not handled in switch" */
+ unsigned int npools;
+ unsigned int *pool_to; /* maps pool id to cpu or node */
+ unsigned int *to_pool; /* maps cpu or node to pool id */
+};
+
+extern struct svc_pool_map svc_pool_map;
+
+/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net));
+ struct svc_serv_ops *);
+struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
+ struct svc_pool *pool, int node);
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool, int node);
+void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
+unsigned int svc_pool_map_get(void);
+void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv *, struct net *net),
- svc_thread_fn, struct module *);
+ struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 4929a8a9f..7ccc961f3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,6 +132,7 @@ struct svcxprt_rdma {
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
int sc_max_sge;
+ int sc_max_sge_rd; /* max sge for read target */
int sc_sq_depth; /* Depth of SQ */
atomic_t sc_sq_count; /* Number of SQ WR on queue */
@@ -208,6 +209,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_sendto(struct svc_rqst *);
+extern struct rpcrdma_read_chunk *
+ svc_rdma_get_read_chunk(struct rpcrdma_msg *);
/* svc_rdma_transport.c */
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -220,7 +223,6 @@ extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
-extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
@@ -233,83 +235,4 @@ extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
-/*
- * Returns the address of the first read chunk or <nul> if no read chunk is
- * present
- */
-static inline struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *ch =
- (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
- if (ch->rc_discrim == 0)
- return NULL;
-
- return ch;
-}
-
-/*
- * Returns the address of the first read write array element or <nul> if no
- * write array list is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
- if (rmsgp->rm_body.rm_chunks[0] != 0
- || rmsgp->rm_body.rm_chunks[1] == 0)
- return NULL;
-
- return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
-
-/*
- * Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
-{
- struct rpcrdma_read_chunk *rch;
- struct rpcrdma_write_array *wr_ary;
- struct rpcrdma_write_array *rp_ary;
-
- /* XXX: Need to fix when reply list may occur with read-list and/or
- * write list */
- if (rmsgp->rm_body.rm_chunks[0] != 0 ||
- rmsgp->rm_body.rm_chunks[1] != 0)
- return NULL;
-
- rch = svc_rdma_get_read_chunk(rmsgp);
- if (rch) {
- while (rch->rc_discrim)
- rch++;
-
- /* The reply list follows an empty write array located
- * at 'rc_position' here. The reply array is at rc_target.
- */
- rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-
- goto found_it;
- }
-
- wr_ary = svc_rdma_get_write_array(rmsgp);
- if (wr_ary) {
- rp_ary = (struct rpcrdma_write_array *)
- &wr_ary->
- wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
- goto found_it;
- }
-
- /* No read list, no write list */
- rp_ary = (struct rpcrdma_write_array *)
- &rmsgp->rm_body.rm_chunks[2];
-
- found_it:
- if (rp_ary->wc_discrim == 0)
- return NULL;
-
- return rp_ary;
-}
#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f3d..78512cfe1 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
int svc_create_xprt(struct svc_serv *, const char *, struct net *,
const int, const unsigned short, int);
+void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b17613052..b7b279b54 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,7 +49,7 @@
* a single chunk type per message is supported currently.
*/
#define RPCRDMA_MIN_SLOT_TABLE (2U)
-#define RPCRDMA_DEF_SLOT_TABLE (32U)
+#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9cdd0be25..f2eb1893e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -354,7 +354,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MEMCG
-extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+ /* root ? */
+ if (mem_cgroup_disabled() || !memcg->css.parent)
+ return vm_swappiness;
+
+ return memcg->swappiness;
+}
+
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -376,9 +384,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio, int err);
+extern void end_swap_bio_write(struct bio *bio);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- void (*end_write_func)(struct bio *, int));
+ bio_end_io_t end_write_func);
extern int swap_set_page_dirty(struct page *page);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
@@ -401,6 +409,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
+extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr,
+ bool *new_page_allocated);
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
@@ -436,6 +447,7 @@ extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int page_swapcount(struct page *);
+extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
@@ -529,6 +541,11 @@ static inline int page_swapcount(struct page *page)
return 0;
}
+static inline int swp_swapcount(swp_entry_t entry)
+{
+ return 0;
+}
+
#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cedf3d3c3..5c3a5f3e7 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
#ifdef CONFIG_MEMORY_FAILURE
+
+extern atomic_long_t num_poisoned_pages __read_mostly;
+
/*
* Support for hardware poisoned pages
*/
@@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return TestSetPageHWPoison(page);
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+ atomic_long_inc(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_dec(void)
+{
+ atomic_long_dec(&num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_add(long num)
+{
+ atomic_long_add(num, &num_poisoned_pages);
+}
+
+static inline void num_poisoned_pages_sub(long num)
+{
+ atomic_long_sub(num, &num_poisoned_pages);
+}
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
+
+static inline bool test_set_page_hwpoison(struct page *page)
+{
+ return false;
+}
+
+static inline void num_poisoned_pages_inc(void)
+{
+}
#endif
#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b8c..a460e2ef2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
asmlinkage long sys_eventfd(unsigned int count);
asmlinkage long sys_eventfd2(unsigned int count, int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_userfaultfd(int flags);
asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
@@ -884,4 +885,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
+asmlinkage long sys_membarrier(int cmd, int flags);
+
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 037e9df2f..157d366e7 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,23 +92,19 @@ struct thermal_zone_device_ops {
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
- int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_temp) (struct thermal_zone_device *, int *);
int (*get_mode) (struct thermal_zone_device *,
enum thermal_device_mode *);
int (*set_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_temp) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long *);
- int (*set_trip_hyst) (struct thermal_zone_device *, int,
- unsigned long);
- int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
- int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
+ int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
+ int (*get_crit_temp) (struct thermal_zone_device *, int *);
+ int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
int (*notify) (struct thermal_zone_device *, int,
@@ -332,9 +328,9 @@ struct thermal_genl_event {
* temperature.
*/
struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, long *);
+ int (*get_temp)(void *, int *);
int (*get_trend)(void *, long *);
- int (*set_emul_temp)(void *, unsigned long);
+ int (*set_emul_temp)(void *, int);
};
/**
@@ -364,7 +360,7 @@ static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline
@@ -384,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
int power_actor_get_max_power(struct thermal_cooling_device *,
struct thermal_zone_device *tz, u32 *max_power);
+int power_actor_get_min_power(struct thermal_cooling_device *,
+ struct thermal_zone_device *tz, u32 *min_power);
int power_actor_set_power(struct thermal_cooling_device *,
struct thermal_instance *, u32);
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -406,7 +404,7 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -419,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 *max_power)
{ return 0; }
+static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 *min_power)
+{ return -ENODEV; }
static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
struct thermal_instance *tz, u32 power)
{ return 0; }
@@ -457,7 +459,7 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
const char *name)
{ return ERR_PTR(-ENODEV); }
static inline int thermal_zone_get_temp(
- struct thermal_zone_device *tz, unsigned long *temp)
+ struct thermal_zone_device *tz, int *temp)
{ return -ENODEV; }
static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
{ return -ENODEV; }
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index c78dcfeaf..d4217eff4 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
-extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
diff --git a/include/linux/tick.h b/include/linux/tick.h
index edbfc9a52..e312219ff 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,22 +147,38 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
-extern void __tick_nohz_full_check(void);
+static inline int housekeeping_any_cpu(void)
+{
+ return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+}
+
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
-extern void __tick_nohz_task_switch(struct task_struct *tsk);
+extern void __tick_nohz_task_switch(void);
#else
+static inline int housekeeping_any_cpu(void)
+{
+ return smp_processor_id();
+}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
-static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
-static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
+static inline void __tick_nohz_task_switch(void) { }
#endif
+static inline const struct cpumask *housekeeping_cpumask(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_enabled())
+ return housekeeping_mask;
+#endif
+ return cpu_possible_mask;
+}
+
static inline bool is_housekeeping_cpu(int cpu)
{
#ifdef CONFIG_NO_HZ_FULL
@@ -181,16 +197,10 @@ static inline void housekeeping_affine(struct task_struct *t)
#endif
}
-static inline void tick_nohz_full_check(void)
-{
- if (tick_nohz_full_enabled())
- __tick_nohz_full_check();
-}
-
-static inline void tick_nohz_task_switch(struct task_struct *tsk)
+static inline void tick_nohz_task_switch(void)
{
if (tick_nohz_full_enabled())
- __tick_nohz_task_switch(tsk);
+ __tick_nohz_task_switch();
}
#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 77b5df2ac..367d5af89 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,11 +12,18 @@ typedef __s64 time64_t;
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
+#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
+
+struct itimerspec64 {
+ struct timespec64 it_interval;
+ struct timespec64 it_value;
+};
+
#endif
/* Parameters used to convert the timespec values: */
@@ -45,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ts;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ return *its64;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ return *its;
+}
+
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
@@ -77,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
return ret;
}
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+ struct itimerspec ret;
+
+ ret.it_interval = timespec64_to_timespec(its64->it_interval);
+ ret.it_value = timespec64_to_timespec(its64->it_value);
+ return ret;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+ struct itimerspec64 ret;
+
+ ret.it_interval = timespec_to_timespec64(its->it_interval);
+ ret.it_value = timespec_to_timespec64(its->it_value);
+ return ret;
+}
+
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 6e191e4e6..ba0ae09cb 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -18,10 +18,17 @@ extern int do_sys_settimeofday(const struct timespec *tv,
* Kernel time accessors
*/
unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
+struct timespec64 current_kernel_time64(void);
/* does not take xtime_lock */
struct timespec __current_kernel_time(void);
+static inline struct timespec current_kernel_time(void)
+{
+ struct timespec64 now = current_kernel_time64();
+
+ return timespec64_to_timespec(now);
+}
+
/*
* timespec based interfaces
*/
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 1063c850d..ed27917ca 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -243,6 +243,7 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
+ TRACE_EVENT_FL_UPROBE_BIT,
};
/*
@@ -257,6 +258,7 @@ enum {
* USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
+ * UPROBE - Event is a uprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +269,11 @@ enum {
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
+ TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
};
+#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+
struct trace_event_call {
struct list_head list;
struct trace_event_class *class;
@@ -542,7 +547,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
event_triggers_post_call(file, tt);
}
-#ifdef CONFIG_BPF_SYSCALL
+#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ad6c8913a..9bddda029 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -605,7 +605,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
/* tty_audit.c */
#ifdef CONFIG_AUDIT
-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
size_t size, unsigned icanon);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
@@ -613,8 +613,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern int tty_audit_push_current(void);
#else
-static inline void tty_audit_add_data(struct tty_struct *tty,
- unsigned char *data, size_t size, unsigned icanon)
+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ size_t size, unsigned icanon)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
@@ -709,4 +709,10 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {}
static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
#endif
+#define tty_debug(tty, f, args...) \
+ do { \
+ printk(KERN_DEBUG "%s: %s: " f, __func__, \
+ tty_name(tty), ##args); \
+ } while (0)
+
#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 92e337c18..161052477 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -296,7 +296,7 @@ struct tty_operations {
struct tty_driver {
int magic; /* magic number for this structure */
struct kref kref; /* Reference management */
- struct cdev *cdevs;
+ struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
diff --git a/include/linux/types.h b/include/linux/types.h
index 8715287c3..c314989d9 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,6 +212,9 @@ struct callback_head {
};
#define rcu_head callback_head
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
/* clocksource cycle base type */
typedef u64 cycle_t;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c138..d6f2c2c5b 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 60beb5dc7..0bdc72f36 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -92,6 +92,22 @@ struct uprobe_task {
unsigned int depth;
};
+struct return_instance {
+ struct uprobe *uprobe;
+ unsigned long func;
+ unsigned long stack; /* stack pointer */
+ unsigned long orig_ret_vaddr; /* original return address */
+ bool chained; /* true, if instance is nested */
+
+ struct return_instance *next; /* keep as stack */
+};
+
+enum rp_check {
+ RP_CHECK_CALL,
+ RP_CHECK_CHAIN_CALL,
+ RP_CHECK_RET,
+};
+
struct xol_area;
struct uprobes_state {
@@ -128,6 +144,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ab94f78c4..a41833cd1 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,8 +19,11 @@ struct ci_hdrc_platform_data {
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
+#define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1)
#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
-#define CI_HDRC_DISABLE_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_HOST_STREAMING BIT(3)
+#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \
+ CI_HDRC_DISABLE_HOST_STREAMING)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
* but otg is not supported (no register otgsc).
@@ -29,12 +32,22 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
#define CI_HDRC_FORCE_FULLSPEED BIT(6)
#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
+#define CI_HDRC_SET_NON_ZERO_TTHA BIT(8)
+#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
+#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
+#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
+ struct usb_otg_caps ci_otg_caps;
bool tpl_support;
+ /* interrupt threshold setting */
+ u32 itc_setting;
+ u32 ahb_burst_config;
+ u32 tx_burst_size;
+ u32 rx_burst_size;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2511469a9..1074b8921 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -228,6 +228,8 @@ struct usb_function {
struct list_head list;
DECLARE_BITMAP(endpoints, 32);
const struct usb_function_instance *fi;
+
+ unsigned int bind_deactivated:1;
};
int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f3dfb7d0..c14a69b36 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@ struct usb_ep_ops {
};
/**
+ * struct usb_ep_caps - endpoint capabilities description
+ * @type_control:Endpoint supports control type (reserved for ep0).
+ * @type_iso:Endpoint supports isochronous transfers.
+ * @type_bulk:Endpoint supports bulk transfers.
+ * @type_int:Endpoint supports interrupt transfers.
+ * @dir_in:Endpoint supports IN direction.
+ * @dir_out:Endpoint supports OUT direction.
+ */
+struct usb_ep_caps {
+ unsigned type_control:1;
+ unsigned type_iso:1;
+ unsigned type_bulk:1;
+ unsigned type_int:1;
+ unsigned dir_in:1;
+ unsigned dir_out:1;
+};
+
+#define USB_EP_CAPS_TYPE_CONTROL 0x01
+#define USB_EP_CAPS_TYPE_ISO 0x02
+#define USB_EP_CAPS_TYPE_BULK 0x04
+#define USB_EP_CAPS_TYPE_INT 0x08
+#define USB_EP_CAPS_TYPE_ALL \
+ (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+#define USB_EP_CAPS_DIR_IN 0x01
+#define USB_EP_CAPS_DIR_OUT 0x02
+#define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
+
+#define USB_EP_CAPS(_type, _dir) \
+ { \
+ .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
+ .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
+ .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
+ .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
+ .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
+ .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
+ }
+
+/**
* struct usb_ep - device side representation of USB endpoint
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
+ * @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
* the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,15 @@ struct usb_ep_ops {
* gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
* and is accessed only in response to a driver setup() callback.
*/
+
struct usb_ep {
void *driver_data;
const char *name;
const struct usb_ep_ops *ops;
struct list_head ep_list;
+ struct usb_ep_caps caps;
+ bool claimed;
unsigned maxpacket:16;
unsigned maxpacket_limit:16;
unsigned max_streams:16;
@@ -492,6 +534,9 @@ struct usb_gadget_ops {
int (*udc_start)(struct usb_gadget *,
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
+ struct usb_ep *(*match_ep)(struct usb_gadget *,
+ struct usb_endpoint_descriptor *,
+ struct usb_ss_ep_comp_descriptor *);
};
/**
@@ -511,6 +556,7 @@ struct usb_gadget_ops {
* @dev: Driver model state for this abstract device.
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
+ * @otg_caps: OTG capabilities of this gadget.
* @sg_supported: true if we can handle scatter-gather
* @is_otg: True if the USB device port uses a Mini-AB jack, so that the
* gadget driver must provide a USB OTG descriptor.
@@ -526,6 +572,9 @@ struct usb_gadget_ops {
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
* @is_selfpowered: if the gadget is self-powered.
+ * @deactivated: True if gadget is deactivated - in deactivated state it cannot
+ * be connected.
+ * @connected: True if gadget is connected.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -559,6 +608,7 @@ struct usb_gadget {
struct device dev;
unsigned out_epnum;
unsigned in_epnum;
+ struct usb_otg_caps *otg_caps;
unsigned sg_supported:1;
unsigned is_otg:1;
@@ -567,7 +617,12 @@ struct usb_gadget {
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
unsigned quirk_ep_out_aligned_size:1;
+ unsigned quirk_altset_not_supp:1;
+ unsigned quirk_stall_not_supp:1;
+ unsigned quirk_zlp_not_supp:1;
unsigned is_selfpowered:1;
+ unsigned deactivated:1;
+ unsigned connected:1;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -584,7 +639,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
#define gadget_for_each_ep(tmp, gadget) \
list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
-
/**
* usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
* requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +657,34 @@ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
}
/**
+ * gadget_is_altset_supported - return true iff the hardware supports
+ * altsettings
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_altset_supported(struct usb_gadget *g)
+{
+ return !g->quirk_altset_not_supp;
+}
+
+/**
+ * gadget_is_stall_supported - return true iff the hardware supports stalling
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_stall_supported(struct usb_gadget *g)
+{
+ return !g->quirk_stall_not_supp;
+}
+
+/**
+ * gadget_is_zlp_supported - return true iff the hardware supports zlp
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_zlp_supported(struct usb_gadget *g)
+{
+ return !g->quirk_zlp_not_supp;
+}
+
+/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
* @g: controller that might support both high and full speeds
*/
@@ -771,9 +853,24 @@ static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
*/
static inline int usb_gadget_connect(struct usb_gadget *gadget)
{
+ int ret;
+
if (!gadget->ops->pullup)
return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 1);
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will be connected automatically after activation.
+ */
+ gadget->connected = true;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+ return ret;
}
/**
@@ -784,20 +881,88 @@ static inline int usb_gadget_connect(struct usb_gadget *gadget)
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ if (!gadget->ops->pullup)
+ return -EOPNOTSUPP;
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ return 0;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+ return ret;
+}
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
* This routine may be used during the gadget driver bind() call to prevent
* the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_connect() is called. For example, user mode components may
+ * usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
* Returns zero on success, else negative errno.
*/
-static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
{
- if (!gadget->ops->pullup)
- return -EOPNOTSUPP;
- return gadget->ops->pullup(gadget, 0);
+ int ret;
+
+ if (gadget->deactivated)
+ return 0;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect(gadget);
+ if (ret)
+ return ret;
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+ return 0;
}
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ if (!gadget->deactivated)
+ return 0;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ return usb_gadget_connect(gadget);
+
+ return 0;
+}
/*-------------------------------------------------------------------------*/
@@ -1002,6 +1167,10 @@ int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **ss);
void usb_free_all_descriptors(struct usb_function *f);
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+ struct usb_gadget *gadget);
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+ struct usb_descriptor_header *otg_desc);
/*-------------------------------------------------------------------------*/
/* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1203,21 @@ extern void usb_gadget_giveback_request(struct usb_ep *ep,
/*-------------------------------------------------------------------------*/
+/* utility to find endpoint by name */
+
+extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
+ const char *name);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to check if endpoint caps match descriptor needs */
+
+extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp);
+
+/*-------------------------------------------------------------------------*/
+
/* utility to update vbus status for udc core, it may be scheduled */
extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c9aa7792d..d2784c10b 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -564,9 +564,9 @@ extern void usb_ep0_reinit(struct usb_device *);
/*-------------------------------------------------------------------------*/
-/* class requests from USB 3.0 hub spec, table 10-5 */
-#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
-#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
+/* class requests from USB 3.1 hub spec, table 10-7 */
+#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
+#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
/*
* Generic bandwidth allocation constants/support
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index e55a15042..8c8f6854c 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -128,7 +128,7 @@ struct msm_otg_platform_data {
*/
struct msm_usb_cable {
struct notifier_block nb;
- struct extcon_specific_cable_nb conn;
+ struct extcon_dev *extcon;
};
/**
@@ -155,6 +155,10 @@ struct msm_usb_cable {
* starting controller using usbcmd run/stop bit.
* @vbus: VBUS signal state trakining, using extcon framework
* @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
*/
struct msm_otg {
struct usb_phy phy;
@@ -188,6 +192,9 @@ struct msm_otg {
struct msm_usb_cable vbus;
struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
};
#endif
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index cfe0528cd..8c5a818ec 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -15,6 +15,8 @@
enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
bool of_usb_host_tpl_support(struct device_node *np);
+int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps);
#else
static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
{
@@ -30,6 +32,11 @@ static inline bool of_usb_host_tpl_support(struct device_node *np)
{
return false;
}
+static inline int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 52661c5da..bd1dcf816 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,21 @@ struct usb_otg {
};
+/**
+ * struct usb_otg_caps - describes the otg capabilities of the device
+ * @otg_rev: The OTG revision number the device is compliant with, it's
+ * in binary-coded decimal (i.e. 2.0 is 0200H).
+ * @hnp_support: Indicates if the device supports HNP.
+ * @srp_support: Indicates if the device supports SRP.
+ * @adp_support: Indicates if the device supports ADP.
+ */
+struct usb_otg_caps {
+ u16 otg_rev;
+ bool hnp_support;
+ bool srp_support;
+ bool adp_support;
+};
+
extern const char *usb_otg_state_string(enum usb_otg_state state);
/* Context: can sleep */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 3dd5a781d..bfb74723f 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
*/
int pio_dma_border; /* default is 64byte */
- u32 type;
+ uintptr_t type;
u32 enable_gpio;
/*
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 000000000..587480ad4
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
+/*
+ * include/linux/userfaultfd_k.h
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_K_H
+#define _LINUX_USERFAULTFD_K_H
+
+#ifdef CONFIG_USERFAULTFD
+
+#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
+
+#include <linux/fcntl.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from userfaultfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define UFFD_CLOEXEC O_CLOEXEC
+#define UFFD_NONBLOCK O_NONBLOCK
+
+#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+
+extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long reason);
+
+extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+ unsigned long src_start, unsigned long len);
+extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
+ unsigned long dst_start,
+ unsigned long len);
+
+/* mm helpers */
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_MISSING;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+}
+
+#else /* CONFIG_USERFAULTFD */
+
+/* mm helpers */
+static inline int handle_userfault(struct vm_area_struct *vma,
+ unsigned long address,
+ unsigned int flags,
+ unsigned long reason)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return true;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+#endif /* CONFIG_USERFAULTFD */
+
+#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
index ac3481921..da2049b51 100644
--- a/include/linux/verify_pefile.h
+++ b/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
#ifndef _LINUX_VERIFY_PEFILE_H
#define _LINUX_VERIFY_PEFILE_H
+#include <crypto/public_key.h>
+
extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
- struct key *trusted_keyring, bool *_trusted);
+ struct key *trusted_keyring,
+ enum key_being_used_for usage,
+ bool *_trusted);
#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47feada5..d74a0e907 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void watchdog_nmi_disable_all(void);
-void watchdog_nmi_enable_all(void);
-#else
-static inline void watchdog_nmi_disable_all(void) {}
-static inline void watchdog_nmi_enable_all(void) {}
-#endif
-
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 738b30b39..0197358f1 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -265,7 +265,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
/**
* delayed_work_pending - Find out whether a delayable work item is currently
* pending
- * @work: The work item in question
+ * @w: The work item in question
*/
#define delayed_work_pending(w) \
work_pending(&(w)->work)
@@ -366,7 +366,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
* information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args: args for @fmt
+ * @args...: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index f9d41a6e3..e183a0a65 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -9,7 +9,7 @@ struct zbud_ops {
int (*evict)(struct zbud_pool *pool, unsigned long handle);
};
-struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
void zbud_destroy_pool(struct zbud_pool *pool);
int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index d30eff3d8..42f8ec992 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,8 +36,10 @@ enum zpool_mapmode {
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
};
+bool zpool_has_pool(char *type);
+
struct zpool *zpool_create_pool(char *type, char *name,
- gfp_t gfp, struct zpool_ops *ops);
+ gfp_t gfp, const struct zpool_ops *ops);
char *zpool_get_type(struct zpool *pool);
@@ -81,7 +83,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops,
+ void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
struct zpool *zpool);
void (*destroy)(void *pool);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 1338190b5..6398dfae5 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -34,6 +34,11 @@ enum zs_mapmode {
*/
};
+struct zs_pool_stats {
+ /* How many pages were migrated (freed) */
+ unsigned long pages_compacted;
+};
+
struct zs_pool;
struct zs_pool *zs_create_pool(char *name, gfp_t flags);
@@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 0dc7060f9..17ddae320 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -53,9 +53,13 @@ struct media_file_operations {
/**
* struct media_devnode - Media device node
+ * @fops: pointer to struct media_file_operations with media device ops
+ * @dev: struct device pointer for the media controller device
+ * @cdev: struct cdev pointer character device
* @parent: parent device
* @minor: device node minor number
* @flags: flags, combination of the MEDIA_FLAG_* constants
+ * @release: release callback called at the end of media_devnode_release()
*
* This structure represents a media-related device node.
*
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
deleted file mode 100644
index 048f8f911..000000000
--- a/include/media/omap3isp.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * omap3isp.h
- *
- * TI OMAP3 ISP - Platform data
- *
- * Copyright (C) 2011 Nokia Corporation
- *
- * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- * Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __MEDIA_OMAP3ISP_H__
-#define __MEDIA_OMAP3ISP_H__
-
-struct i2c_board_info;
-struct isp_device;
-
-enum isp_interface_type {
- ISP_INTERFACE_PARALLEL,
- ISP_INTERFACE_CSI2A_PHY2,
- ISP_INTERFACE_CCP2B_PHY1,
- ISP_INTERFACE_CCP2B_PHY2,
- ISP_INTERFACE_CSI2C_PHY1,
-};
-
-enum {
- ISP_LANE_SHIFT_0 = 0,
- ISP_LANE_SHIFT_2 = 1,
- ISP_LANE_SHIFT_4 = 2,
- ISP_LANE_SHIFT_6 = 3,
-};
-
-/**
- * struct isp_parallel_cfg - Parallel interface configuration
- * @data_lane_shift: Data lane shifter
- * ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
- * ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
- * ISP_LANE_SHIFT_4 - CAMEXT[13:4] -> CAM[9:0]
- * ISP_LANE_SHIFT_6 - CAMEXT[13:6] -> CAM[7:0]
- * @clk_pol: Pixel clock polarity
- * 0 - Sample on rising edge, 1 - Sample on falling edge
- * @hs_pol: Horizontal synchronization polarity
- * 0 - Active high, 1 - Active low
- * @vs_pol: Vertical synchronization polarity
- * 0 - Active high, 1 - Active low
- * @fld_pol: Field signal polarity
- * 0 - Positive, 1 - Negative
- * @data_pol: Data polarity
- * 0 - Normal, 1 - One's complement
- */
-struct isp_parallel_cfg {
- unsigned int data_lane_shift:2;
- unsigned int clk_pol:1;
- unsigned int hs_pol:1;
- unsigned int vs_pol:1;
- unsigned int fld_pol:1;
- unsigned int data_pol:1;
-};
-
-enum {
- ISP_CCP2_PHY_DATA_CLOCK = 0,
- ISP_CCP2_PHY_DATA_STROBE = 1,
-};
-
-enum {
- ISP_CCP2_MODE_MIPI = 0,
- ISP_CCP2_MODE_CCP2 = 1,
-};
-
-/**
- * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct isp_csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISP_CSIPHY1_NUM_DATA_LANES 1
-#define ISP_CSIPHY2_NUM_DATA_LANES 2
-
-/**
- * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct isp_csiphy_lanes_cfg {
- struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
- struct isp_csiphy_lane clk;
-};
-
-/**
- * struct isp_ccp2_cfg - CCP2 interface configuration
- * @strobe_clk_pol: Strobe/clock polarity
- * 0 - Non Inverted, 1 - Inverted
- * @crc: Enable the cyclic redundancy check
- * @ccp2_mode: Enable CCP2 compatibility mode
- * ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
- * ISP_CCP2_MODE_CCP2 - CCP2 mode
- * @phy_layer: Physical layer selection
- * ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
- * ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
- * @vpclk_div: Video port output clock control
- */
-struct isp_ccp2_cfg {
- unsigned int strobe_clk_pol:1;
- unsigned int crc:1;
- unsigned int ccp2_mode:1;
- unsigned int phy_layer:1;
- unsigned int vpclk_div:2;
- struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-/**
- * struct isp_csi2_cfg - CSI2 interface configuration
- * @crc: Enable the cyclic redundancy check
- */
-struct isp_csi2_cfg {
- unsigned crc:1;
- struct isp_csiphy_lanes_cfg lanecfg;
-};
-
-struct isp_bus_cfg {
- enum isp_interface_type interface;
- union {
- struct isp_parallel_cfg parallel;
- struct isp_ccp2_cfg ccp2;
- struct isp_csi2_cfg csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct isp_platform_subdev {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
- struct isp_bus_cfg *bus;
-};
-
-struct isp_platform_data {
- struct isp_platform_subdev *subdevs;
- void (*set_constraints)(struct isp_device *isp, bool enable);
-};
-
-#endif /* __MEDIA_OMAP3ISP_H__ */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 644bdc61c..ec921f653 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -69,7 +69,7 @@ enum rc_filter_type {
* @rc_map: current scan/key table
* @lock: used to ensure we've filled in all protocol details before
* anyone can call show_protocols or store_protocols
- * @devno: unique remote control device number
+ * @minor: unique minor remote control device number
* @raw: additional data for raw pulse/space devices
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
@@ -110,7 +110,7 @@ enum rc_filter_type {
* @s_tx_mask: set transmitter mask (for devices with multiple tx outputs)
* @s_tx_carrier: set transmit carrier frequency
* @s_tx_duty_cycle: set transmit duty cycle (0% - 100%)
- * @s_rx_carrier: inform driver about carrier it is expected to handle
+ * @s_rx_carrier_range: inform driver about carrier it is expected to handle
* @tx_ir: transmit IR
* @s_idle: enable/disable hardware idle mode, upon which,
* device doesn't interrupt host until it sees IR pulses
@@ -129,7 +129,7 @@ struct rc_dev {
const char *map_name;
struct rc_map rc_map;
struct mutex lock;
- unsigned long devno;
+ unsigned int minor;
struct ir_raw_event_ctrl *raw;
struct input_dev *input_dev;
enum rc_driver_type driver_type;
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 27763d5bd..7c4bbc4df 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -14,30 +14,28 @@
enum rc_type {
RC_TYPE_UNKNOWN = 0, /* Protocol not known */
RC_TYPE_OTHER = 1, /* Protocol known but proprietary */
- RC_TYPE_LIRC = 2, /* Pass raw IR to lirc userspace */
- RC_TYPE_RC5 = 3, /* Philips RC5 protocol */
- RC_TYPE_RC5X = 4, /* Philips RC5x protocol */
- RC_TYPE_RC5_SZ = 5, /* StreamZap variant of RC5 */
- RC_TYPE_JVC = 6, /* JVC protocol */
- RC_TYPE_SONY12 = 7, /* Sony 12 bit protocol */
- RC_TYPE_SONY15 = 8, /* Sony 15 bit protocol */
- RC_TYPE_SONY20 = 9, /* Sony 20 bit protocol */
- RC_TYPE_NEC = 10, /* NEC protocol */
- RC_TYPE_SANYO = 11, /* Sanyo protocol */
- RC_TYPE_MCE_KBD = 12, /* RC6-ish MCE keyboard/mouse */
- RC_TYPE_RC6_0 = 13, /* Philips RC6-0-16 protocol */
- RC_TYPE_RC6_6A_20 = 14, /* Philips RC6-6A-20 protocol */
- RC_TYPE_RC6_6A_24 = 15, /* Philips RC6-6A-24 protocol */
- RC_TYPE_RC6_6A_32 = 16, /* Philips RC6-6A-32 protocol */
- RC_TYPE_RC6_MCE = 17, /* MCE (Philips RC6-6A-32 subtype) protocol */
- RC_TYPE_SHARP = 18, /* Sharp protocol */
- RC_TYPE_XMP = 19, /* XMP protocol */
+ RC_TYPE_RC5 = 2, /* Philips RC5 protocol */
+ RC_TYPE_RC5X = 3, /* Philips RC5x protocol */
+ RC_TYPE_RC5_SZ = 4, /* StreamZap variant of RC5 */
+ RC_TYPE_JVC = 5, /* JVC protocol */
+ RC_TYPE_SONY12 = 6, /* Sony 12 bit protocol */
+ RC_TYPE_SONY15 = 7, /* Sony 15 bit protocol */
+ RC_TYPE_SONY20 = 8, /* Sony 20 bit protocol */
+ RC_TYPE_NEC = 9, /* NEC protocol */
+ RC_TYPE_SANYO = 10, /* Sanyo protocol */
+ RC_TYPE_MCE_KBD = 11, /* RC6-ish MCE keyboard/mouse */
+ RC_TYPE_RC6_0 = 12, /* Philips RC6-0-16 protocol */
+ RC_TYPE_RC6_6A_20 = 13, /* Philips RC6-6A-20 protocol */
+ RC_TYPE_RC6_6A_24 = 14, /* Philips RC6-6A-24 protocol */
+ RC_TYPE_RC6_6A_32 = 15, /* Philips RC6-6A-32 protocol */
+ RC_TYPE_RC6_MCE = 16, /* MCE (Philips RC6-6A-32 subtype) protocol */
+ RC_TYPE_SHARP = 17, /* Sharp protocol */
+ RC_TYPE_XMP = 18, /* XMP protocol */
};
#define RC_BIT_NONE 0
#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN)
#define RC_BIT_OTHER (1 << RC_TYPE_OTHER)
-#define RC_BIT_LIRC (1 << RC_TYPE_LIRC)
#define RC_BIT_RC5 (1 << RC_TYPE_RC5)
#define RC_BIT_RC5X (1 << RC_TYPE_RC5X)
#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ)
@@ -56,7 +54,7 @@ enum rc_type {
#define RC_BIT_SHARP (1 << RC_TYPE_SHARP)
#define RC_BIT_XMP (1 << RC_TYPE_XMP)
-#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \
+#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \
RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
diff --git a/include/media/tc358743.h b/include/media/tc358743.h
new file mode 100644
index 000000000..4513f2f9c
--- /dev/null
+++ b/include/media/tc358743.h
@@ -0,0 +1,131 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
+ */
+
+#ifndef _TC358743_
+#define _TC358743_
+
+enum tc358743_ddc5v_delays {
+ DDC5V_DELAY_0_MS,
+ DDC5V_DELAY_50_MS,
+ DDC5V_DELAY_100_MS,
+ DDC5V_DELAY_200_MS,
+};
+
+enum tc358743_hdmi_detection_delay {
+ HDMI_MODE_DELAY_0_MS,
+ HDMI_MODE_DELAY_25_MS,
+ HDMI_MODE_DELAY_50_MS,
+ HDMI_MODE_DELAY_100_MS,
+};
+
+struct tc358743_platform_data {
+ /* System clock connected to REFCLK (pin H5) */
+ u32 refclk_hz; /* 26 MHz, 27 MHz or 42 MHz */
+
+ /* DDC +5V debounce delay to avoid spurious interrupts when the cable
+ * is connected.
+ * Sets DDC5V_MODE in register DDC_CTL.
+ * Default: DDC5V_DELAY_0_MS
+ */
+ enum tc358743_ddc5v_delays ddc5v_delay;
+
+ bool enable_hdcp;
+
+ /*
+ * The FIFO size is 512x32, so Toshiba recommend to set the default FIFO
+ * level to somewhere in the middle (e.g. 300), so it can cover speed
+ * mismatches in input and output ports.
+ */
+ u16 fifo_level;
+
+ /* Bps pr lane is (refclk_hz / pll_prd) * pll_fbd */
+ u16 pll_prd;
+ u16 pll_fbd;
+
+ /* CSI
+ * Calculate CSI parameters with REF_02 for the highest resolution your
+ * CSI interface can handle. The driver will adjust the number of CSI
+ * lanes in use according to the pixel clock.
+ *
+ * The values in brackets are calculated with REF_02 when the number of
+ * bps pr lane is 823.5 MHz, and can serve as a starting point.
+ */
+ u32 lineinitcnt; /* (0x00001770) */
+ u32 lptxtimecnt; /* (0x00000005) */
+ u32 tclk_headercnt; /* (0x00001d04) */
+ u32 tclk_trailcnt; /* (0x00000000) */
+ u32 ths_headercnt; /* (0x00000505) */
+ u32 twakeup; /* (0x00004650) */
+ u32 tclk_postcnt; /* (0x00000000) */
+ u32 ths_trailcnt; /* (0x00000004) */
+ u32 hstxvregcnt; /* (0x00000005) */
+
+ /* DVI->HDMI detection delay to avoid unnecessary switching between DVI
+ * and HDMI mode.
+ * Sets HDMI_DET_V in register HDMI_DET.
+ * Default: HDMI_MODE_DELAY_0_MS
+ */
+ enum tc358743_hdmi_detection_delay hdmi_detection_delay;
+
+ /* Reset PHY automatically when TMDS clock goes from DC to AC.
+ * Sets PHY_AUTO_RST2 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_detected;
+
+ /* Reset PHY automatically when TMDS clock passes 21 MHz.
+ * Sets PHY_AUTO_RST3 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_in_range;
+
+ /* Reset PHY automatically when TMDS clock is detected.
+ * Sets PHY_AUTO_RST4 in register PHY_CTL2.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_tmds_valid;
+
+ /* Reset HDMI PHY automatically when hsync period is out of range.
+ * Sets H_PI_RST in register HV_RST.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_hsync_out_of_range;
+
+ /* Reset HDMI PHY automatically when vsync period is out of range.
+ * Sets V_PI_RST in register HV_RST.
+ * Default: false
+ */
+ bool hdmi_phy_auto_reset_vsync_out_of_range;
+};
+
+/* custom controls */
+/* Audio sample rate in Hz */
+#define TC358743_CID_AUDIO_SAMPLING_RATE (V4L2_CID_USER_TC358743_BASE + 0)
+/* Audio present status */
+#define TC358743_CID_AUDIO_PRESENT (V4L2_CID_USER_TC358743_BASE + 1)
+
+#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 768356917..1d6d7da4c 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -32,7 +32,8 @@ enum v4l2_async_match_type {
/**
* struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- * @bus_type: subdevice bus type to select the appropriate matching method
+ *
+ * @match_type: type of match that will be used
* @match: union of per-bus type matching data sets
* @list: used to link struct v4l2_async_subdev objects, waiting to be
* probed, to a notifier->waiting list
@@ -62,8 +63,9 @@ struct v4l2_async_subdev {
};
/**
- * v4l2_async_notifier - v4l2_device notifier data
- * @num_subdevs:number of subdevices
+ * struct v4l2_async_notifier - v4l2_device notifier data
+ *
+ * @num_subdevs: number of subdevices
* @subdevs: array of pointers to subdevice descriptors
* @v4l2_dev: pointer to struct v4l2_device
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 911f3e542..da6fe9802 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -36,7 +36,8 @@ struct v4l2_subscribed_event;
struct v4l2_fh;
struct poll_table_struct;
-/** union v4l2_ctrl_ptr - A pointer to a control value.
+/**
+ * union v4l2_ctrl_ptr - A pointer to a control value.
* @p_s32: Pointer to a 32-bit signed value.
* @p_s64: Pointer to a 64-bit signed value.
* @p_u8: Pointer to a 8-bit unsigned value.
@@ -55,30 +56,34 @@ union v4l2_ctrl_ptr {
void *p;
};
-/** struct v4l2_ctrl_ops - The control operations that the driver has to provide.
- * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
- * for volatile (and usually read-only) controls such as a control
- * that returns the current signal strength which changes
- * continuously.
- * If not set, then the currently cached value will be returned.
- * @try_ctrl: Test whether the control's value is valid. Only relevant when
- * the usual min/max/step checks are not sufficient.
- * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The
- * ctrl->handler->lock is held when these ops are called, so no
- * one else can access controls owned by that handler.
- */
+/**
+ * struct v4l2_ctrl_ops - The control operations that the driver has to provide.
+ * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
+ * for volatile (and usually read-only) controls such as a control
+ * that returns the current signal strength which changes
+ * continuously.
+ * If not set, then the currently cached value will be returned.
+ * @try_ctrl: Test whether the control's value is valid. Only relevant when
+ * the usual min/max/step checks are not sufficient.
+ * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The
+ * ctrl->handler->lock is held when these ops are called, so no
+ * one else can access controls owned by that handler.
+ */
struct v4l2_ctrl_ops {
int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl);
int (*try_ctrl)(struct v4l2_ctrl *ctrl);
int (*s_ctrl)(struct v4l2_ctrl *ctrl);
};
-/** struct v4l2_ctrl_type_ops - The control type operations that the driver has to provide.
- * @equal: return true if both values are equal.
- * @init: initialize the value.
- * @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value otherwise.
- */
+/**
+ * struct v4l2_ctrl_type_ops - The control type operations that the driver
+ * has to provide.
+ *
+ * @equal: return true if both values are equal.
+ * @init: initialize the value.
+ * @log: log the value.
+ * @validate: validate the value. Return 0 on success and a negative value otherwise.
+ */
struct v4l2_ctrl_type_ops {
bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr1,
@@ -92,74 +97,80 @@ struct v4l2_ctrl_type_ops {
typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
-/** struct v4l2_ctrl - The control structure.
- * @node: The list node.
- * @ev_subs: The list of control event subscriptions.
- * @handler: The handler that owns the control.
- * @cluster: Point to start of cluster array.
- * @ncontrols: Number of controls in cluster array.
- * @done: Internal flag: set for each processed control.
- * @is_new: Set when the user specified a new value for this control. It
- * is also set when called from v4l2_ctrl_handler_setup. Drivers
- * should never set this flag.
- * @has_changed: Set when the current value differs from the new value. Drivers
- * should never use this flag.
- * @is_private: If set, then this control is private to its handler and it
- * will not be added to any other handlers. Drivers can set
- * this flag.
- * @is_auto: If set, then this control selects whether the other cluster
- * members are in 'automatic' mode or 'manual' mode. This is
- * used for autogain/gain type clusters. Drivers should never
- * set this flag directly.
- * @is_int: If set, then this control has a simple integer value (i.e. it
- * uses ctrl->val).
- * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
- * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
- * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
- * v4l2_ext_control uses field p to point to the data.
- * @is_array: If set, then this control contains an N-dimensional array.
- * @has_volatiles: If set, then one or more members of the cluster are volatile.
- * Drivers should never touch this flag.
- * @call_notify: If set, then call the handler's notify function whenever the
- * control's value changes.
- * @manual_mode_value: If the is_auto flag is set, then this is the value
- * of the auto control that determines if that control is in
- * manual mode. So if the value of the auto control equals this
- * value, then the whole cluster is in manual mode. Drivers should
- * never set this flag directly.
- * @ops: The control ops.
- * @type_ops: The control type ops.
- * @id: The control ID.
- * @name: The control name.
- * @type: The control type.
- * @minimum: The control's minimum value.
- * @maximum: The control's maximum value.
- * @default_value: The control's default value.
- * @step: The control's step value for non-menu controls.
- * @elems: The number of elements in the N-dimensional array.
- * @elem_size: The size in bytes of the control.
- * @dims: The size of each dimension.
- * @nr_of_dims:The number of dimensions in @dims.
- * @menu_skip_mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 32 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a u64 or a bit array.
- * @qmenu: A const char * array for all menu items. Array entries that are
- * empty strings ("") correspond to non-existing menu items (this
- * is in addition to the menu_skip_mask above). The last entry
- * must be NULL.
- * @flags: The control's flags.
- * @cur: The control's current value.
- * @val: The control's new s32 value.
- * @val64: The control's new s64 value.
- * @priv: The control's private pointer. For use by the driver. It is
- * untouched by the control framework. Note that this pointer is
- * not freed when the control is deleted. Should this be needed
- * then a new internal bitfield can be added to tell the framework
- * to free this pointer.
- */
+/**
+ * struct v4l2_ctrl - The control structure.
+ * @node: The list node.
+ * @ev_subs: The list of control event subscriptions.
+ * @handler: The handler that owns the control.
+ * @cluster: Point to start of cluster array.
+ * @ncontrols: Number of controls in cluster array.
+ * @done: Internal flag: set for each processed control.
+ * @is_new: Set when the user specified a new value for this control. It
+ * is also set when called from v4l2_ctrl_handler_setup. Drivers
+ * should never set this flag.
+ * @has_changed: Set when the current value differs from the new value. Drivers
+ * should never use this flag.
+ * @is_private: If set, then this control is private to its handler and it
+ * will not be added to any other handlers. Drivers can set
+ * this flag.
+ * @is_auto: If set, then this control selects whether the other cluster
+ * members are in 'automatic' mode or 'manual' mode. This is
+ * used for autogain/gain type clusters. Drivers should never
+ * set this flag directly.
+ * @is_int: If set, then this control has a simple integer value (i.e. it
+ * uses ctrl->val).
+ * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
+ * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
+ * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
+ * v4l2_ext_control uses field p to point to the data.
+ * @is_array: If set, then this control contains an N-dimensional array.
+ * @has_volatiles: If set, then one or more members of the cluster are volatile.
+ * Drivers should never touch this flag.
+ * @call_notify: If set, then call the handler's notify function whenever the
+ * control's value changes.
+ * @manual_mode_value: If the is_auto flag is set, then this is the value
+ * of the auto control that determines if that control is in
+ * manual mode. So if the value of the auto control equals this
+ * value, then the whole cluster is in manual mode. Drivers should
+ * never set this flag directly.
+ * @ops: The control ops.
+ * @type_ops: The control type ops.
+ * @id: The control ID.
+ * @name: The control name.
+ * @type: The control type.
+ * @minimum: The control's minimum value.
+ * @maximum: The control's maximum value.
+ * @default_value: The control's default value.
+ * @step: The control's step value for non-menu controls.
+ * @elems: The number of elements in the N-dimensional array.
+ * @elem_size: The size in bytes of the control.
+ * @dims: The size of each dimension.
+ * @nr_of_dims:The number of dimensions in @dims.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 32 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a u64 or a bit array.
+ * @qmenu: A const char * array for all menu items. Array entries that are
+ * empty strings ("") correspond to non-existing menu items (this
+ * is in addition to the menu_skip_mask above). The last entry
+ * must be NULL.
+ * @flags: The control's flags.
+ * @cur: The control's current value.
+ * @val: The control's new s32 value.
+ * @priv: The control's private pointer. For use by the driver. It is
+ * untouched by the control framework. Note that this pointer is
+ * not freed when the control is deleted. Should this be needed
+ * then a new internal bitfield can be added to tell the framework
+ * to free this pointer.
+ * @p_cur: The control's current value represented via an union with
+ * provides a standard way of accessing control types
+ * through a pointer.
+ * @p_new: The control's new value represented via an union with provides
+ * a standard way of accessing control types
+ * through a pointer.
+ */
struct v4l2_ctrl {
/* Administrative fields */
struct list_head node;
@@ -210,16 +221,17 @@ struct v4l2_ctrl {
union v4l2_ctrl_ptr p_cur;
};
-/** struct v4l2_ctrl_ref - The control reference.
- * @node: List node for the sorted list.
- * @next: Single-link list node for the hash.
- * @ctrl: The actual control information.
- * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls().
- *
- * Each control handler has a list of these refs. The list_head is used to
- * keep a sorted-by-control-ID list of all controls, while the next pointer
- * is used to link the control in the hash's bucket.
- */
+/**
+ * struct v4l2_ctrl_ref - The control reference.
+ * @node: List node for the sorted list.
+ * @next: Single-link list node for the hash.
+ * @ctrl: The actual control information.
+ * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls().
+ *
+ * Each control handler has a list of these refs. The list_head is used to
+ * keep a sorted-by-control-ID list of all controls, while the next pointer
+ * is used to link the control in the hash's bucket.
+ */
struct v4l2_ctrl_ref {
struct list_head node;
struct v4l2_ctrl_ref *next;
@@ -227,25 +239,26 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_helper *helper;
};
-/** struct v4l2_ctrl_handler - The control handler keeps track of all the
- * controls: both the controls owned by the handler and those inherited
- * from other handlers.
- * @_lock: Default for "lock".
- * @lock: Lock to control access to this handler and its controls.
- * May be replaced by the user right after init.
- * @ctrls: The list of controls owned by this handler.
- * @ctrl_refs: The list of control references.
- * @cached: The last found control reference. It is common that the same
- * control is needed multiple times, so this is a simple
- * optimization.
- * @buckets: Buckets for the hashing. Allows for quick control lookup.
- * @notify: A notify callback that is called whenever the control changes value.
- * Note that the handler's lock is held when the notify function
- * is called!
- * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
- * @nr_of_buckets: Total number of buckets in the array.
- * @error: The error code of the first failed control addition.
- */
+/**
+ * struct v4l2_ctrl_handler - The control handler keeps track of all the
+ * controls: both the controls owned by the handler and those inherited
+ * from other handlers.
+ * @_lock: Default for "lock".
+ * @lock: Lock to control access to this handler and its controls.
+ * May be replaced by the user right after init.
+ * @ctrls: The list of controls owned by this handler.
+ * @ctrl_refs: The list of control references.
+ * @cached: The last found control reference. It is common that the same
+ * control is needed multiple times, so this is a simple
+ * optimization.
+ * @buckets: Buckets for the hashing. Allows for quick control lookup.
+ * @notify: A notify callback that is called whenever the control changes value.
+ * Note that the handler's lock is held when the notify function
+ * is called!
+ * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
+ * @nr_of_buckets: Total number of buckets in the array.
+ * @error: The error code of the first failed control addition.
+ */
struct v4l2_ctrl_handler {
struct mutex _lock;
struct mutex *lock;
@@ -259,32 +272,35 @@ struct v4l2_ctrl_handler {
int error;
};
-/** struct v4l2_ctrl_config - Control configuration structure.
- * @ops: The control ops.
- * @type_ops: The control type ops. Only needed for compound controls.
- * @id: The control ID.
- * @name: The control name.
- * @type: The control type.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value for non-menu controls.
- * @def: The control's default value.
- * @dims: The size of each dimension.
- * @elem_size: The size in bytes of the control.
- * @flags: The control's flags.
- * @menu_skip_mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @qmenu: A const char * array for all menu items. Array entries that are
- * empty strings ("") correspond to non-existing menu items (this
- * is in addition to the menu_skip_mask above). The last entry
- * must be NULL.
- * @is_private: If set, then this control is private to its handler and it
- * will not be added to any other handlers.
- */
+/**
+ * struct v4l2_ctrl_config - Control configuration structure.
+ * @ops: The control ops.
+ * @type_ops: The control type ops. Only needed for compound controls.
+ * @id: The control ID.
+ * @name: The control name.
+ * @type: The control type.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value for non-menu controls.
+ * @def: The control's default value.
+ * @dims: The size of each dimension.
+ * @elem_size: The size in bytes of the control.
+ * @flags: The control's flags.
+ * @menu_skip_mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @qmenu: A const char * array for all menu items. Array entries that are
+ * empty strings ("") correspond to non-existing menu items (this
+ * is in addition to the menu_skip_mask above). The last entry
+ * must be NULL.
+ * @qmenu_int: A const s64 integer array for all menu items of the type
+ * V4L2_CTRL_TYPE_INTEGER_MENU.
+ * @is_private: If set, then this control is private to its handler and it
+ * will not be added to any other handlers.
+ */
struct v4l2_ctrl_config {
const struct v4l2_ctrl_ops *ops;
const struct v4l2_ctrl_type_ops *type_ops;
@@ -304,42 +320,44 @@ struct v4l2_ctrl_config {
unsigned int is_private:1;
};
-/** v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
- *
- * This works for all standard V4L2 controls.
- * For non-standard controls it will only fill in the given arguments
- * and @name will be NULL.
- *
- * This function will overwrite the contents of @name, @type and @flags.
- * The contents of @min, @max, @step and @def may be modified depending on
- * the type.
- *
- * Do not use in drivers! It is used internally for backwards compatibility
- * control handling only. Once all drivers are converted to use the new
- * control framework this function will no longer be exported.
- */
+/*
+ * v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
+ *
+ * This works for all standard V4L2 controls.
+ * For non-standard controls it will only fill in the given arguments
+ * and @name will be NULL.
+ *
+ * This function will overwrite the contents of @name, @type and @flags.
+ * The contents of @min, @max, @step and @def may be modified depending on
+ * the type.
+ *
+ * Do not use in drivers! It is used internally for backwards compatibility
+ * control handling only. Once all drivers are converted to use the new
+ * control framework this function will no longer be exported.
+ */
void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags);
-/** v4l2_ctrl_handler_init_class() - Initialize the control handler.
- * @hdl: The control handler.
- * @nr_of_controls_hint: A hint of how many controls this handler is
- * expected to refer to. This is the total number, so including
- * any inherited controls. It doesn't have to be precise, but if
- * it is way off, then you either waste memory (too many buckets
- * are allocated) or the control lookup becomes slower (not enough
- * buckets are allocated, so there are more slow list lookups).
- * It will always work, though.
- * @key: Used by the lock validator if CONFIG_LOCKDEP is set.
- * @name: Used by the lock validator if CONFIG_LOCKDEP is set.
- *
- * Returns an error if the buckets could not be allocated. This error will
- * also be stored in @hdl->error.
- *
- * Never use this call directly, always use the v4l2_ctrl_handler_init
- * macro that hides the @key and @name arguments.
- */
+/**
+ * v4l2_ctrl_handler_init_class() - Initialize the control handler.
+ * @hdl: The control handler.
+ * @nr_of_controls_hint: A hint of how many controls this handler is
+ * expected to refer to. This is the total number, so including
+ * any inherited controls. It doesn't have to be precise, but if
+ * it is way off, then you either waste memory (too many buckets
+ * are allocated) or the control lookup becomes slower (not enough
+ * buckets are allocated, so there are more slow list lookups).
+ * It will always work, though.
+ * @key: Used by the lock validator if CONFIG_LOCKDEP is set.
+ * @name: Used by the lock validator if CONFIG_LOCKDEP is set.
+ *
+ * Returns an error if the buckets could not be allocated. This error will
+ * also be stored in @hdl->error.
+ *
+ * Never use this call directly, always use the v4l2_ctrl_handler_init
+ * macro that hides the @key and @name arguments.
+ */
int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint,
struct lock_class_key *key, const char *name);
@@ -361,289 +379,326 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL)
#endif
-/** v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
- * the control list.
- * @hdl: The control handler.
- *
- * Does nothing if @hdl == NULL.
- */
+/**
+ * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
+ * the control list.
+ * @hdl: The control handler.
+ *
+ * Does nothing if @hdl == NULL.
+ */
void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
-/** v4l2_ctrl_lock() - Helper function to lock the handler
- * associated with the control.
- * @ctrl: The control to lock.
- */
+/**
+ * v4l2_ctrl_lock() - Helper function to lock the handler
+ * associated with the control.
+ * @ctrl: The control to lock.
+ */
static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
{
mutex_lock(ctrl->handler->lock);
}
-/** v4l2_ctrl_unlock() - Helper function to unlock the handler
- * associated with the control.
- * @ctrl: The control to unlock.
- */
+/**
+ * v4l2_ctrl_unlock() - Helper function to unlock the handler
+ * associated with the control.
+ * @ctrl: The control to unlock.
+ */
static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
{
mutex_unlock(ctrl->handler->lock);
}
-/** v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
- * to the handler to initialize the hardware to the current control values.
- * @hdl: The control handler.
- *
- * Button controls will be skipped, as are read-only controls.
- *
- * If @hdl == NULL, then this just returns 0.
- */
+/**
+ * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
+ * to the handler to initialize the hardware to the current control values.
+ * @hdl: The control handler.
+ *
+ * Button controls will be skipped, as are read-only controls.
+ *
+ * If @hdl == NULL, then this just returns 0.
+ */
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl);
-/** v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
- * @hdl: The control handler.
- * @prefix: The prefix to use when logging the control values. If the
- * prefix does not end with a space, then ": " will be added
- * after the prefix. If @prefix == NULL, then no prefix will be
- * used.
- *
- * For use with VIDIOC_LOG_STATUS.
- *
- * Does nothing if @hdl == NULL.
- */
+/**
+ * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
+ * @hdl: The control handler.
+ * @prefix: The prefix to use when logging the control values. If the
+ * prefix does not end with a space, then ": " will be added
+ * after the prefix. If @prefix == NULL, then no prefix will be
+ * used.
+ *
+ * For use with VIDIOC_LOG_STATUS.
+ *
+ * Does nothing if @hdl == NULL.
+ */
void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
const char *prefix);
-/** v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
- * control.
- * @hdl: The control handler.
- * @cfg: The control's configuration data.
- * @priv: The control's driver-specific private data.
- *
- * If the &v4l2_ctrl struct could not be allocated then NULL is returned
- * and @hdl->error is set to the error code (if it wasn't set already).
- */
+/**
+ * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
+ * control.
+ * @hdl: The control handler.
+ * @cfg: The control's configuration data.
+ * @priv: The control's driver-specific private data.
+ *
+ * If the &v4l2_ctrl struct could not be allocated then NULL is returned
+ * and @hdl->error is set to the error code (if it wasn't set already).
+ */
struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_config *cfg, void *priv);
-/** v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value
- * @def: The control's default value.
- *
- * If the &v4l2_ctrl struct could not be allocated, or the control
- * ID is not known, then NULL is returned and @hdl->error is set to the
- * appropriate error code (if it wasn't set already).
- *
- * If @id refers to a menu control, then this function will return NULL.
- *
- * Use v4l2_ctrl_new_std_menu() when adding menu controls.
- */
+/**
+ * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * If the &v4l2_ctrl struct could not be allocated, or the control
+ * ID is not known, then NULL is returned and @hdl->error is set to the
+ * appropriate error code (if it wasn't set already).
+ *
+ * If @id refers to a menu control, then this function will return NULL.
+ *
+ * Use v4l2_ctrl_new_std_menu() when adding menu controls.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s64 min, s64 max, u64 step, s64 def);
-/** v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @def: The control's default value.
- *
- * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
- * determines which menu items are to be skipped.
- *
- * If @id refers to a non-menu control, then this function will return NULL.
- */
+/**
+ * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @def: The control's default value.
+ *
+ * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
+ * determines which menu items are to be skipped.
+ *
+ * If @id refers to a non-menu control, then this function will return NULL.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 max, u64 mask, u8 def);
-/** v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
- * with driver specific menu.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @mask: The control's skip mask for menu controls. This makes it
- * easy to skip menu items that are not valid. If bit X is set,
- * then menu item X is skipped. Of course, this only works for
- * menus with <= 64 menu items. There are no menus that come
- * close to that number, so this is OK. Should we ever need more,
- * then this will have to be extended to a bit array.
- * @def: The control's default value.
- * @qmenu: The new menu.
- *
- * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
- * menu of this control.
- *
- */
+/**
+ * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
+ * with driver specific menu.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @mask: The control's skip mask for menu controls. This makes it
+ * easy to skip menu items that are not valid. If bit X is set,
+ * then menu item X is skipped. Of course, this only works for
+ * menus with <= 64 menu items. There are no menus that come
+ * close to that number, so this is OK. Should we ever need more,
+ * then this will have to be extended to a bit array.
+ * @def: The control's default value.
+ * @qmenu: The new menu.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
+ * menu of this control.
+ *
+ */
struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id, u8 max,
u64 mask, u8 def, const char * const *qmenu);
-/** v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
- * @hdl: The control handler.
- * @ops: The control ops.
- * @id: The control ID.
- * @max: The control's maximum value.
- * @def: The control's default value.
- * @qmenu_int: The control's menu entries.
- *
- * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
- * takes as an argument an array of integers determining the menu items.
- *
- * If @id refers to a non-integer-menu control, then this function will return NULL.
- */
+/**
+ * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @def: The control's default value.
+ * @qmenu_int: The control's menu entries.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
+ * takes as an argument an array of integers determining the menu items.
+ *
+ * If @id refers to a non-integer-menu control, then this function will return NULL.
+ */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, u8 max, u8 def, const s64 *qmenu_int);
-/** v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
- * @hdl: The control handler.
- * @ctrl: The control to add.
- *
- * It will return NULL if it was unable to add the control reference.
- * If the control already belonged to the handler, then it will do
- * nothing and just return @ctrl.
- */
+/**
+ * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
+ * @hdl: The control handler.
+ * @ctrl: The control to add.
+ *
+ * It will return NULL if it was unable to add the control reference.
+ * If the control already belonged to the handler, then it will do
+ * nothing and just return @ctrl.
+ */
struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl);
-/** v4l2_ctrl_add_handler() - Add all controls from handler @add to
- * handler @hdl.
- * @hdl: The control handler.
- * @add: The control handler whose controls you want to add to
- * the @hdl control handler.
- * @filter: This function will filter which controls should be added.
- *
- * Does nothing if either of the two handlers is a NULL pointer.
- * If @filter is NULL, then all controls are added. Otherwise only those
- * controls for which @filter returns true will be added.
- * In case of an error @hdl->error will be set to the error code (if it
- * wasn't set already).
- */
+/**
+ * v4l2_ctrl_add_handler() - Add all controls from handler @add to
+ * handler @hdl.
+ * @hdl: The control handler.
+ * @add: The control handler whose controls you want to add to
+ * the @hdl control handler.
+ * @filter: This function will filter which controls should be added.
+ *
+ * Does nothing if either of the two handlers is a NULL pointer.
+ * If @filter is NULL, then all controls are added. Otherwise only those
+ * controls for which @filter returns true will be added.
+ * In case of an error @hdl->error will be set to the error code (if it
+ * wasn't set already).
+ */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
bool (*filter)(const struct v4l2_ctrl *ctrl));
-/** v4l2_ctrl_radio_filter() - Standard filter for radio controls.
- * @ctrl: The control that is filtered.
- *
- * This will return true for any controls that are valid for radio device
- * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
- * transmitter class controls.
- *
- * This function is to be used with v4l2_ctrl_add_handler().
- */
+/**
+ * v4l2_ctrl_radio_filter() - Standard filter for radio controls.
+ * @ctrl: The control that is filtered.
+ *
+ * This will return true for any controls that are valid for radio device
+ * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
+ * transmitter class controls.
+ *
+ * This function is to be used with v4l2_ctrl_add_handler().
+ */
bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl);
-/** v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
- * @ncontrols: The number of controls in this cluster.
- * @controls: The cluster control array of size @ncontrols.
- */
+/**
+ * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
+ * @ncontrols: The number of controls in this cluster.
+ * @controls: The cluster control array of size @ncontrols.
+ */
void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls);
-/** v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
- * that cluster and set it up for autofoo/foo-type handling.
- * @ncontrols: The number of controls in this cluster.
- * @controls: The cluster control array of size @ncontrols. The first control
- * must be the 'auto' control (e.g. autogain, autoexposure, etc.)
- * @manual_val: The value for the first control in the cluster that equals the
- * manual setting.
- * @set_volatile: If true, then all controls except the first auto control will
- * be volatile.
- *
- * Use for control groups where one control selects some automatic feature and
- * the other controls are only active whenever the automatic feature is turned
- * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
- * red and blue balance, etc.
- *
- * The behavior of such controls is as follows:
- *
- * When the autofoo control is set to automatic, then any manual controls
- * are set to inactive and any reads will call g_volatile_ctrl (if the control
- * was marked volatile).
- *
- * When the autofoo control is set to manual, then any manual controls will
- * be marked active, and any reads will just return the current value without
- * going through g_volatile_ctrl.
- *
- * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
- * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
- * if autofoo is in auto mode.
- */
+/**
+ * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
+ * that cluster and set it up for autofoo/foo-type handling.
+ * @ncontrols: The number of controls in this cluster.
+ * @controls: The cluster control array of size @ncontrols. The first control
+ * must be the 'auto' control (e.g. autogain, autoexposure, etc.)
+ * @manual_val: The value for the first control in the cluster that equals the
+ * manual setting.
+ * @set_volatile: If true, then all controls except the first auto control will
+ * be volatile.
+ *
+ * Use for control groups where one control selects some automatic feature and
+ * the other controls are only active whenever the automatic feature is turned
+ * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
+ * red and blue balance, etc.
+ *
+ * The behavior of such controls is as follows:
+ *
+ * When the autofoo control is set to automatic, then any manual controls
+ * are set to inactive and any reads will call g_volatile_ctrl (if the control
+ * was marked volatile).
+ *
+ * When the autofoo control is set to manual, then any manual controls will
+ * be marked active, and any reads will just return the current value without
+ * going through g_volatile_ctrl.
+ *
+ * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
+ * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
+ * if autofoo is in auto mode.
+ */
void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
u8 manual_val, bool set_volatile);
-/** v4l2_ctrl_find() - Find a control with the given ID.
- * @hdl: The control handler.
- * @id: The control ID to find.
- *
- * If @hdl == NULL this will return NULL as well. Will lock the handler so
- * do not use from inside &v4l2_ctrl_ops.
- */
+/**
+ * v4l2_ctrl_find() - Find a control with the given ID.
+ * @hdl: The control handler.
+ * @id: The control ID to find.
+ *
+ * If @hdl == NULL this will return NULL as well. Will lock the handler so
+ * do not use from inside &v4l2_ctrl_ops.
+ */
struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
-/** v4l2_ctrl_activate() - Make the control active or inactive.
- * @ctrl: The control to (de)activate.
- * @active: True if the control should become active.
- *
- * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
- * Does nothing if @ctrl == NULL.
- * This will usually be called from within the s_ctrl op.
- * The V4L2_EVENT_CTRL event will be generated afterwards.
- *
- * This function assumes that the control handler is locked.
- */
+/**
+ * v4l2_ctrl_activate() - Make the control active or inactive.
+ * @ctrl: The control to (de)activate.
+ * @active: True if the control should become active.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * This will usually be called from within the s_ctrl op.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ *
+ * This function assumes that the control handler is locked.
+ */
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
-/** v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
- * @ctrl: The control to (de)activate.
- * @grabbed: True if the control should become grabbed.
- *
- * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
- * Does nothing if @ctrl == NULL.
- * The V4L2_EVENT_CTRL event will be generated afterwards.
- * This will usually be called when starting or stopping streaming in the
- * driver.
- *
- * This function assumes that the control handler is not locked and will
- * take the lock itself.
- */
+/**
+ * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
+ * @ctrl: The control to (de)activate.
+ * @grabbed: True if the control should become grabbed.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ * This will usually be called when starting or stopping streaming in the
+ * driver.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
-/** __v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() */
+/**
+ *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
+ *
+ * @ctrl: The control to update.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def);
-/** v4l2_ctrl_modify_range() - Update the range of a control.
- * @ctrl: The control to update.
- * @min: The control's minimum value.
- * @max: The control's maximum value.
- * @step: The control's step value
- * @def: The control's default value.
- *
- * Update the range of a control on the fly. This works for control types
- * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
- * @step value is interpreted as a menu_skip_mask.
- *
- * An error is returned if one of the range arguments is invalid for this
- * control type.
- *
- * This function assumes that the control handler is not locked and will
- * take the lock itself.
- */
+/**
+ * v4l2_ctrl_modify_range() - Update the range of a control.
+ * @ctrl: The control to update.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def)
{
@@ -656,21 +711,23 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
return rval;
}
-/** v4l2_ctrl_notify() - Function to set a notify callback for a control.
- * @ctrl: The control.
- * @notify: The callback function.
- * @priv: The callback private handle, passed as argument to the callback.
- *
- * This function sets a callback function for the control. If @ctrl is NULL,
- * then it will do nothing. If @notify is NULL, then the notify callback will
- * be removed.
- *
- * There can be only one notify. If another already exists, then a WARN_ON
- * will be issued and the function will do nothing.
- */
+/**
+ * v4l2_ctrl_notify() - Function to set a notify callback for a control.
+ * @ctrl: The control.
+ * @notify: The callback function.
+ * @priv: The callback private handle, passed as argument to the callback.
+ *
+ * This function sets a callback function for the control. If @ctrl is NULL,
+ * then it will do nothing. If @notify is NULL, then the notify callback will
+ * be removed.
+ *
+ * There can be only one notify. If another already exists, then a WARN_ON
+ * will be issued and the function will do nothing.
+ */
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
-/** v4l2_ctrl_get_name() - Get the name of the control
+/**
+ * v4l2_ctrl_get_name() - Get the name of the control
* @id: The control ID.
*
* This function returns the name of the given control ID or NULL if it isn't
@@ -678,7 +735,8 @@ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void
*/
const char *v4l2_ctrl_get_name(u32 id);
-/** v4l2_ctrl_get_menu() - Get the menu string array of the control
+/**
+ * v4l2_ctrl_get_menu() - Get the menu string array of the control
* @id: The control ID.
*
* This function returns the NULL-terminated menu string array name of the
@@ -686,7 +744,8 @@ const char *v4l2_ctrl_get_name(u32 id);
*/
const char * const *v4l2_ctrl_get_menu(u32 id);
-/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
+/**
+ * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
* @id: The control ID.
* @len: The size of the integer array.
*
@@ -695,29 +754,41 @@ const char * const *v4l2_ctrl_get_menu(u32 id);
*/
const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
-/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
- * @ctrl: The control.
- *
- * This returns the control's value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for integer type controls only.
- */
+/**
+ * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
+ * @ctrl: The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
-/** __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). */
+/**
+ * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl().
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val);
+
/** v4l2_ctrl_s_ctrl() - Helper function to set the control's value from within a driver.
- * @ctrl: The control.
- * @val: The new value.
- *
- * This set the control's new value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for integer type controls only.
- */
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for integer type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
int rval;
@@ -729,30 +800,45 @@ static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
return rval;
}
-/** v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value from within a driver.
- * @ctrl: The control.
- *
- * This returns the control's value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for 64-bit integer type controls only.
- */
+/**
+ * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value
+ * from within a driver.
+ * @ctrl: The control.
+ *
+ * This returns the control's value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl);
-/** __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). */
+/**
+ * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64().
+ *
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val);
-/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value from within a driver.
- * @ctrl: The control.
- * @val: The new value.
- *
- * This set the control's new value safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for 64-bit integer type controls only.
- */
+/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @val: The new value.
+ *
+ * This set the control's new value safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for 64-bit integer type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
{
int rval;
@@ -764,19 +850,31 @@ static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
return rval;
}
-/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). */
+/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string().
+ *
+ * @ctrl: The control.
+ * @s: The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s);
-/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value from within a driver.
- * @ctrl: The control.
- * @s: The new string.
- *
- * This set the control's new string safely by going through the control
- * framework. This function will lock the control's handler, so it cannot be
- * used from within the &v4l2_ctrl_ops functions.
- *
- * This function is for string type controls only.
- */
+/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @s: The new string.
+ *
+ * This set the control's new string safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for string type controls only.
+ */
static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
{
int rval;
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index eecd3102a..b6130b50a 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -23,11 +23,14 @@
#include <linux/videodev2.h>
-/** v4l2_dv_timings_presets: list of all dv_timings presets.
+/**
+ * v4l2_dv_timings_presets: list of all dv_timings presets.
*/
extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
-/** v4l2_check_dv_timings_fnc - timings check callback
+/**
+ * v4l2_check_dv_timings_fnc - timings check callback
+ *
* @t: the v4l2_dv_timings struct.
* @handle: a handle from the driver.
*
@@ -35,86 +38,101 @@ extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
*/
typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
-/** v4l2_valid_dv_timings() - are these timings valid?
- * @t: the v4l2_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @fnc: callback to check if this timing is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * Returns true if the given dv_timings struct is supported by the
- * hardware capabilities and the callback function (if non-NULL), returns
- * false otherwise.
- */
+/**
+ * v4l2_valid_dv_timings() - are these timings valid?
+ *
+ * @t: the v4l2_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * Returns true if the given dv_timings struct is supported by the
+ * hardware capabilities and the callback function (if non-NULL), returns
+ * false otherwise.
+ */
bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV timings based on capabilities
- * @t: the v4l2_enum_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @fnc: callback to check if this timing is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * This enumerates dv_timings using the full list of possible CEA-861 and DMT
- * timings, filtering out any timings that are not supported based on the
- * hardware capabilities and the callback function (if non-NULL).
- *
- * If a valid timing for the given index is found, it will fill in @t and
- * return 0, otherwise it returns -EINVAL.
- */
+/**
+ * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV
+ * timings based on capabilities
+ *
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This enumerates dv_timings using the full list of possible CEA-861 and DMT
+ * timings, filtering out any timings that are not supported based on the
+ * hardware capabilities and the callback function (if non-NULL).
+ *
+ * If a valid timing for the given index is found, it will fill in @t and
+ * return 0, otherwise it returns -EINVAL.
+ */
int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_find_dv_timings_cap() - Find the closest timings struct
- * @t: the v4l2_enum_dv_timings struct.
- * @cap: the v4l2_dv_timings_cap capabilities.
- * @pclock_delta: maximum delta between t->pixelclock and the timing struct
- * under consideration.
- * @fnc: callback to check if a given timings struct is OK. May be NULL.
- * @fnc_handle: a handle that is passed on to @fnc.
- *
- * This function tries to map the given timings to an entry in the
- * full list of possible CEA-861 and DMT timings, filtering out any timings
- * that are not supported based on the hardware capabilities and the callback
- * function (if non-NULL).
- *
- * On success it will fill in @t with the found timings and it returns true.
- * On failure it will return false.
- */
+/**
+ * v4l2_find_dv_timings_cap() - Find the closest timings struct
+ *
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @pclock_delta: maximum delta between t->pixelclock and the timing struct
+ * under consideration.
+ * @fnc: callback to check if a given timings struct is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This function tries to map the given timings to an entry in the
+ * full list of possible CEA-861 and DMT timings, filtering out any timings
+ * that are not supported based on the hardware capabilities and the callback
+ * function (if non-NULL).
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
const struct v4l2_dv_timings_cap *cap,
unsigned pclock_delta,
v4l2_check_dv_timings_fnc fnc,
void *fnc_handle);
-/** v4l2_match_dv_timings() - do two timings match?
- * @measured: the measured timings data.
- * @standard: the timings according to the standard.
- * @pclock_delta: maximum delta in Hz between standard->pixelclock and
- * the measured timings.
- *
- * Returns true if the two timings match, returns false otherwise.
- */
+/**
+ * v4l2_match_dv_timings() - do two timings match?
+ *
+ * @measured: the measured timings data.
+ * @standard: the timings according to the standard.
+ * @pclock_delta: maximum delta in Hz between standard->pixelclock and
+ * the measured timings.
+ *
+ * Returns true if the two timings match, returns false otherwise.
+ */
bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured,
const struct v4l2_dv_timings *standard,
unsigned pclock_delta);
-/** v4l2_print_dv_timings() - log the contents of a dv_timings struct
- * @dev_prefix:device prefix for each log line.
- * @prefix: additional prefix for each log line, may be NULL.
- * @t: the timings data.
- * @detailed: if true, give a detailed log.
- */
+/**
+ * v4l2_print_dv_timings() - log the contents of a dv_timings struct
+ * @dev_prefix:device prefix for each log line.
+ * @prefix: additional prefix for each log line, may be NULL.
+ * @t: the timings data.
+ * @detailed: if true, give a detailed log.
+ */
void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
const struct v4l2_dv_timings *t, bool detailed);
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+/**
+ * v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ *
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced - if this flag is true, it indicates interlaced format
@@ -125,9 +143,12 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* in with the found CVT timings.
*/
bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt);
+ unsigned active_width, u32 polarities, bool interlaced,
+ struct v4l2_dv_timings *fmt);
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+/**
+ * v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ *
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
@@ -149,8 +170,10 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
u32 polarities, bool interlaced, struct v4l2_fract aspect,
struct v4l2_dv_timings *fmt);
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+/**
+ * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
* 0x15 and 0x16 from the EDID.
+ *
* @hor_landscape - byte 0x15 from the EDID.
* @vert_portrait - byte 0x16 from the EDID.
*
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 1ab9045e5..9792f9064 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -68,10 +68,11 @@ struct v4l2_subdev;
struct v4l2_subscribed_event;
struct video_device;
-/** struct v4l2_kevent - Internal kernel event struct.
- * @list: List node for the v4l2_fh->available list.
- * @sev: Pointer to parent v4l2_subscribed_event.
- * @event: The event itself.
+/**
+ * struct v4l2_kevent - Internal kernel event struct.
+ * @list: List node for the v4l2_fh->available list.
+ * @sev: Pointer to parent v4l2_subscribed_event.
+ * @event: The event itself.
*/
struct v4l2_kevent {
struct list_head list;
@@ -80,11 +81,12 @@ struct v4l2_kevent {
};
/** struct v4l2_subscribed_event_ops - Subscribed event operations.
- * @add: Optional callback, called when a new listener is added
- * @del: Optional callback, called when a listener stops listening
- * @replace: Optional callback that can replace event 'old' with event 'new'.
- * @merge: Optional callback that can merge event 'old' into event 'new'.
- */
+ *
+ * @add: Optional callback, called when a new listener is added
+ * @del: Optional callback, called when a listener stops listening
+ * @replace: Optional callback that can replace event 'old' with event 'new'.
+ * @merge: Optional callback that can merge event 'old' into event 'new'.
+ */
struct v4l2_subscribed_event_ops {
int (*add)(struct v4l2_subscribed_event *sev, unsigned elems);
void (*del)(struct v4l2_subscribed_event *sev);
@@ -92,19 +94,20 @@ struct v4l2_subscribed_event_ops {
void (*merge)(const struct v4l2_event *old, struct v4l2_event *new);
};
-/** struct v4l2_subscribed_event - Internal struct representing a subscribed event.
- * @list: List node for the v4l2_fh->subscribed list.
- * @type: Event type.
- * @id: Associated object ID (e.g. control ID). 0 if there isn't any.
- * @flags: Copy of v4l2_event_subscription->flags.
- * @fh: Filehandle that subscribed to this event.
- * @node: List node that hooks into the object's event list (if there is one).
- * @ops: v4l2_subscribed_event_ops
- * @elems: The number of elements in the events array.
- * @first: The index of the events containing the oldest available event.
- * @in_use: The number of queued events.
- * @events: An array of @elems events.
- */
+/**
+ * struct v4l2_subscribed_event - Internal struct representing a subscribed event.
+ * @list: List node for the v4l2_fh->subscribed list.
+ * @type: Event type.
+ * @id: Associated object ID (e.g. control ID). 0 if there isn't any.
+ * @flags: Copy of v4l2_event_subscription->flags.
+ * @fh: Filehandle that subscribed to this event.
+ * @node: List node that hooks into the object's event list (if there is one).
+ * @ops: v4l2_subscribed_event_ops
+ * @elems: The number of elements in the events array.
+ * @first: The index of the events containing the oldest available event.
+ * @in_use: The number of queued events.
+ * @events: An array of @elems events.
+ */
struct v4l2_subscribed_event {
struct list_head list;
u32 type;
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 098236c08..3d184ab52 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -48,13 +48,13 @@ struct v4l2_flash_ops {
/**
* struct v4l2_flash_config - V4L2 Flash sub-device initialization data
* @dev_name: the name of the media entity,
- unique in the system
+ * unique in the system
* @torch_intensity: constraints for the LED in torch mode
* @indicator_intensity: constraints for the indicator LED
* @flash_faults: bitmask of flash faults that the LED flash class
- device can report; corresponding LED_FAULT* bit
- definitions are available in the header file
- <linux/led-class-flash.h>
+ * device can report; corresponding LED_FAULT* bit
+ * definitions are available in the header file
+ * <linux/led-class-flash.h>
* @has_external_strobe: external strobe capability
*/
struct v4l2_flash_config {
@@ -105,7 +105,7 @@ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
* @fled_cdev: LED flash class device to wrap
* @iled_cdev: LED flash class device representing indicator LED associated
* with fled_cdev, may be NULL
- * @flash_ops: V4L2 Flash device ops
+ * @ops: V4L2 Flash device ops
* @config: initialization data for V4L2 Flash sub-device
*
* Create V4L2 Flash sub-device wrapping given LED subsystem device.
@@ -123,7 +123,7 @@ struct v4l2_flash *v4l2_flash_init(
/**
* v4l2_flash_release - release V4L2 Flash sub-device
- * @flash: the V4L2 Flash sub-device to release
+ * @v4l2_flash: the V4L2 Flash sub-device to release
*
* Release V4L2 Flash sub-device.
*/
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 73069e4c2..34cc99e09 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -65,7 +65,7 @@
V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3)
/**
- * v4l2_mbus_type - media bus type
+ * enum v4l2_mbus_type - media bus type
* @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync
* @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can
* also be used for BT.1120
@@ -78,7 +78,7 @@ enum v4l2_mbus_type {
};
/**
- * v4l2_mbus_config - media bus configuration
+ * struct v4l2_mbus_config - media bus configuration
* @type: in: interface type
* @flags: in / out: configuration flags, depending on @type
*/
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3bbd96da2..8849aaba6 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -40,6 +40,10 @@
* v4l2_m2m_job_finish() (as if the transaction ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
+ * @lock: optional. Define a driver's own lock callback, instead of using
+ * m2m_ctx->q_lock.
+ * @unlock: optional. Define a driver's own unlock callback, instead of
+ * using m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
@@ -161,6 +165,8 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
* use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -171,6 +177,8 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
* ready for use
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -183,6 +191,8 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
* buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -192,6 +202,8 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
* ready buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -200,6 +212,8 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -209,6 +223,8 @@ struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -221,6 +237,8 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
* buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -230,6 +248,8 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
* ready buffers and return it
+ *
+ * @m2m_ctx: pointer to struct v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 4e18318eb..b273cf9ac 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -44,6 +44,7 @@
struct v4l2_device;
struct v4l2_ctrl_handler;
+struct v4l2_event;
struct v4l2_event_subscription;
struct v4l2_fh;
struct v4l2_subdev;
@@ -117,34 +118,67 @@ struct v4l2_subdev_io_pin_config {
u8 strength; /* Pin drive strength */
};
-/*
- s_io_pin_config: configure one or more chip I/O pins for chips that
- multiplex different internal signal pads out to IO pins. This function
- takes a pointer to an array of 'n' pin configuration entries, one for
- each pin being configured. This function could be called at times
- other than just subdevice initialization.
-
- init: initialize the sensor registers to some sort of reasonable default
- values. Do not use for new drivers and should be removed in existing
- drivers.
-
- load_fw: load firmware.
-
- reset: generic reset command. The argument selects which subsystems to
- reset. Passing 0 will always reset the whole chip. Do not use for new
- drivers without discussing this first on the linux-media mailinglist.
- There should be no reason normally to reset a device.
-
- s_gpio: set GPIO pins. Very simple right now, might need to be extended with
- a direction argument if needed.
-
- s_power: puts subdevice in power saving mode (on == 0) or normal operation
- mode (on == 1).
-
- interrupt_service_routine: Called by the bridge chip's interrupt service
- handler, when an interrupt status has be raised due to this subdev,
- so that this subdev can handle the details. It may schedule work to be
- performed later. It must not sleep. *Called from an IRQ context*.
+/**
+ * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
+ *
+ * @log_status: callback for VIDIOC_LOG_STATUS ioctl handler code.
+ *
+ * @s_io_pin_config: configure one or more chip I/O pins for chips that
+ * multiplex different internal signal pads out to IO pins. This function
+ * takes a pointer to an array of 'n' pin configuration entries, one for
+ * each pin being configured. This function could be called at times
+ * other than just subdevice initialization.
+ *
+ * @init: initialize the sensor registers to some sort of reasonable default
+ * values. Do not use for new drivers and should be removed in existing
+ * drivers.
+ *
+ * @load_fw: load firmware.
+ *
+ * @reset: generic reset command. The argument selects which subsystems to
+ * reset. Passing 0 will always reset the whole chip. Do not use for new
+ * drivers without discussing this first on the linux-media mailinglist.
+ * There should be no reason normally to reset a device.
+ *
+ * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
+ * a direction argument if needed.
+ *
+ * @queryctrl: callback for VIDIOC_QUERYCTL ioctl handler code.
+ *
+ * @g_ctrl: callback for VIDIOC_G_CTRL ioctl handler code.
+ *
+ * @s_ctrl: callback for VIDIOC_S_CTRL ioctl handler code.
+ *
+ * @g_ext_ctrls: callback for VIDIOC_G_EXT_CTRLS ioctl handler code.
+ *
+ * @s_ext_ctrls: callback for VIDIOC_S_EXT_CTRLS ioctl handler code.
+ *
+ * @try_ext_ctrls: callback for VIDIOC_TRY_EXT_CTRLS ioctl handler code.
+ *
+ * @querymenu: callback for VIDIOC_QUERYMENU ioctl handler code.
+ *
+ * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
+ * used to provide support for private ioctls used on the driver.
+ *
+ * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
+ * in order to fix data passed from/to userspace.
+ *
+ * @g_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_register: callback for VIDIOC_G_REGISTER ioctl handler code.
+ *
+ * @s_power: puts subdevice in power saving mode (on == 0) or normal operation
+ * mode (on == 1).
+ *
+ * @interrupt_service_routine: Called by the bridge chip's interrupt service
+ * handler, when an interrupt status has be raised due to this subdev,
+ * so that this subdev can handle the details. It may schedule work to be
+ * performed later. It must not sleep. *Called from an IRQ context*.
+ *
+ * @subscribe_event: used by the drivers to request the control framework that
+ * for it to be warned when the value of a control changes.
+ *
+ * @unsubscribe_event: remove event subscription from the control framework.
*/
struct v4l2_subdev_core_ops {
int (*log_status)(struct v4l2_subdev *sd);
@@ -179,18 +213,32 @@ struct v4l2_subdev_core_ops {
struct v4l2_event_subscription *sub);
};
-/* s_radio: v4l device was opened in radio mode.
-
- g_frequency: freq->type must be filled in. Normally done by video_ioctl2
- or the bridge driver.
-
- g_tuner:
- s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the
- bridge driver.
-
- s_type_addr: sets tuner type and its I2C addr.
-
- s_config: sets tda9887 specific stuff, like port1, port2 and qss
+/**
+ * struct s_radio - Callbacks used when v4l device was opened in radio mode.
+ *
+ * @s_radio: callback for VIDIOC_S_RADIO ioctl handler code.
+ *
+ * @s_frequency: callback for VIDIOC_S_FREQUENCY ioctl handler code.
+ *
+ * @g_frequency: callback for VIDIOC_G_FREQUENCY ioctl handler code.
+ * freq->type must be filled in. Normally done by video_ioctl2
+ * or the bridge driver.
+ *
+ * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS ioctl handler code.
+ *
+ * @g_tuner: callback for VIDIOC_G_TUNER ioctl handler code.
+ *
+ * @s_tuner: callback for VIDIOC_S_TUNER ioctl handler code. vt->type must be
+ * filled in. Normally done by video_ioctl2 or the
+ * bridge driver.
+ *
+ * @g_modulator: callback for VIDIOC_G_MODULATOR ioctl handler code.
+ *
+ * @s_modulator: callback for VIDIOC_S_MODULATOR ioctl handler code.
+ *
+ * @s_type_addr: sets tuner type and its I2C addr.
+ *
+ * @s_config: sets tda9887 specific stuff, like port1, port2 and qss
*/
struct v4l2_subdev_tuner_ops {
int (*s_radio)(struct v4l2_subdev *sd);
@@ -205,25 +253,31 @@ struct v4l2_subdev_tuner_ops {
int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
};
-/* s_clock_freq: set the frequency (in Hz) of the audio clock output.
- Used to slave an audio processor to the video decoder, ensuring that
- audio and video remain synchronized. Usual values for the frequency
- are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
- -EINVAL is returned.
-
- s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
- way to select I2S clock used by driving digital audio streams at some
- board designs. Usual values for the frequency are 1024000 and 2048000.
- If the frequency is not supported, then -EINVAL is returned.
-
- s_routing: used to define the input and/or output pins of an audio chip,
- and any additional configuration data.
- Never attempt to use user-level input IDs (e.g. Composite, S-Video,
- Tuner) at this level. An i2c device shouldn't know about whether an
- input pin is connected to a Composite connector, become on another
- board or platform it might be connected to something else entirely.
- The calling driver is responsible for mapping a user-level input to
- the right pins on the i2c device.
+/**
+ * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings
+ *
+ * @s_clock_freq: set the frequency (in Hz) of the audio clock output.
+ * Used to slave an audio processor to the video decoder, ensuring that
+ * audio and video remain synchronized. Usual values for the frequency
+ * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
+ * -EINVAL is returned.
+ *
+ * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
+ * way to select I2S clock used by driving digital audio streams at some
+ * board designs. Usual values for the frequency are 1024000 and 2048000.
+ * If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @s_routing: used to define the input and/or output pins of an audio chip,
+ * and any additional configuration data.
+ * Never attempt to use user-level input IDs (e.g. Composite, S-Video,
+ * Tuner) at this level. An i2c device shouldn't know about whether an
+ * input pin is connected to a Composite connector, become on another
+ * board or platform it might be connected to something else entirely.
+ * The calling driver is responsible for mapping a user-level input to
+ * the right pins on the i2c device.
+ *
+ * @s_stream: used to notify the audio code that stream will start or has
+ * stopped.
*/
struct v4l2_subdev_audio_ops {
int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
@@ -242,6 +296,7 @@ struct v4l2_subdev_audio_ops {
/**
* struct v4l2_mbus_frame_desc_entry - media bus frame description structure
+ *
* @flags: V4L2_MBUS_FRAME_DESC_FL_* flags
* @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set
* @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB
@@ -265,45 +320,73 @@ struct v4l2_mbus_frame_desc {
unsigned short num_entries;
};
-/*
- s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
- video input devices.
-
- g_std_output: get current standard for video OUTPUT devices. This is ignored
- by video input devices.
-
- g_tvnorms: get v4l2_std_id with all standards supported by the video
- CAPTURE device. This is ignored by video output devices.
-
- g_tvnorms_output: get v4l2_std_id with all standards supported by the video
- OUTPUT device. This is ignored by video capture devices.
-
- s_crystal_freq: sets the frequency of the crystal used to generate the
- clocks in Hz. An extra flags field allows device specific configuration
- regarding clock frequency dividers, etc. If not used, then set flags
- to 0. If the frequency is not supported, then -EINVAL is returned.
-
- g_input_status: get input status. Same as the status field in the v4l2_input
- struct.
-
- s_routing: see s_routing in audio_ops, except this version is for video
- devices.
-
- s_dv_timings(): Set custom dv timings in the sub device. This is used
- when sub device is capable of setting detailed timing information
- in the hardware to generate/detect the video signal.
-
- g_dv_timings(): Get custom dv timings in the sub device.
-
- g_mbus_config: get supported mediabus configurations
-
- s_mbus_config: set a certain mediabus configuration. This operation is added
- for compatibility with soc-camera drivers and should not be used by new
- software.
-
- s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
- can adjust @size to a lower value and must not write more data to the
- buffer starting at @data than the original value of @size.
+/**
+ * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
+ * in video mode.
+ *
+ * @s_routing: see s_routing in audio_ops, except this version is for video
+ * devices.
+ *
+ * @s_crystal_freq: sets the frequency of the crystal used to generate the
+ * clocks in Hz. An extra flags field allows device specific configuration
+ * regarding clock frequency dividers, etc. If not used, then set flags
+ * to 0. If the frequency is not supported, then -EINVAL is returned.
+ *
+ * @g_std: callback for VIDIOC_G_STD ioctl handler code.
+ *
+ * @s_std: callback for VIDIOC_S_STD ioctl handler code.
+ *
+ * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
+ * video input devices.
+ *
+ * @g_std_output: get current standard for video OUTPUT devices. This is ignored
+ * by video input devices.
+ *
+ * @querystd: callback for VIDIOC_QUERYSTD ioctl handler code.
+ *
+ * @g_tvnorms: get v4l2_std_id with all standards supported by the video
+ * CAPTURE device. This is ignored by video output devices.
+ *
+ * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video
+ * OUTPUT device. This is ignored by video capture devices.
+ *
+ * @g_input_status: get input status. Same as the status field in the v4l2_input
+ * struct.
+ *
+ * @s_stream: used to notify the driver that a video stream will start or has
+ * stopped.
+ *
+ * @cropcap: callback for VIDIOC_CROPCAP ioctl handler code.
+ *
+ * @g_crop: callback for VIDIOC_G_CROP ioctl handler code.
+ *
+ * @s_crop: callback for VIDIOC_S_CROP ioctl handler code.
+ *
+ * @g_parm: callback for VIDIOC_G_PARM ioctl handler code.
+ *
+ * @s_parm: callback for VIDIOC_S_PARM ioctl handler code.
+ *
+ * @g_frame_interval: callback for VIDIOC_G_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_frame_interval: callback for VIDIOC_S_FRAMEINTERVAL ioctl handler code.
+ *
+ * @s_dv_timings: Set custom dv timings in the sub device. This is used
+ * when sub device is capable of setting detailed timing information
+ * in the hardware to generate/detect the video signal.
+ *
+ * @g_dv_timings: Get custom dv timings in the sub device.
+ *
+ * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS ioctl handler code.
+ *
+ * @g_mbus_config: get supported mediabus configurations
+ *
+ * @s_mbus_config: set a certain mediabus configuration. This operation is added
+ * for compatibility with soc-camera drivers and should not be used by new
+ * software.
+ *
+ * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
+ * can adjust @size to a lower value and must not write more data to the
+ * buffer starting at @data than the original value of @size.
*/
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -340,34 +423,39 @@ struct v4l2_subdev_video_ops {
unsigned int *size);
};
-/*
- decode_vbi_line: video decoders that support sliced VBI need to implement
- this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
- start of the VBI data that was generated by the decoder. The driver
- then parses the sliced VBI data and sets the other fields in the
- struct accordingly. The pointer p is updated to point to the start of
- the payload which can be copied verbatim into the data field of the
- v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
- type field is set to 0 on return.
-
- s_vbi_data: used to generate VBI signals on a video signal.
- v4l2_sliced_vbi_data is filled with the data packets that should be
- output. Note that if you set the line field to 0, then that VBI signal
- is disabled. If no valid VBI data was found, then the type field is
- set to 0 on return.
-
- g_vbi_data: used to obtain the sliced VBI packet from a readback register.
- Not all video decoders support this. If no data is available because
- the readback register contains invalid or erroneous data -EIO is
- returned. Note that you must fill in the 'id' member and the 'field'
- member (to determine whether CC data from the first or second field
- should be obtained).
-
- s_raw_fmt: setup the video encoder/decoder for raw VBI.
-
- g_sliced_fmt: retrieve the current sliced VBI settings.
-
- s_sliced_fmt: setup the sliced VBI settings.
+/**
+ * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened
+ * in video mode via the vbi device node.
+ *
+ * @decode_vbi_line: video decoders that support sliced VBI need to implement
+ * this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
+ * start of the VBI data that was generated by the decoder. The driver
+ * then parses the sliced VBI data and sets the other fields in the
+ * struct accordingly. The pointer p is updated to point to the start of
+ * the payload which can be copied verbatim into the data field of the
+ * v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
+ * type field is set to 0 on return.
+ *
+ * @s_vbi_data: used to generate VBI signals on a video signal.
+ * v4l2_sliced_vbi_data is filled with the data packets that should be
+ * output. Note that if you set the line field to 0, then that VBI signal
+ * is disabled. If no valid VBI data was found, then the type field is
+ * set to 0 on return.
+ *
+ * @g_vbi_data: used to obtain the sliced VBI packet from a readback register.
+ * Not all video decoders support this. If no data is available because
+ * the readback register contains invalid or erroneous data -EIO is
+ * returned. Note that you must fill in the 'id' member and the 'field'
+ * member (to determine whether CC data from the first or second field
+ * should be obtained).
+ *
+ * @g_sliced_vbi_cap: callback for VIDIOC_SLICED_VBI_CAP ioctl handler code.
+ *
+ * @s_raw_fmt: setup the video encoder/decoder for raw VBI.
+ *
+ * @g_sliced_fmt: retrieve the current sliced VBI settings.
+ *
+ * @s_sliced_fmt: setup the sliced VBI settings.
*/
struct v4l2_subdev_vbi_ops {
int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
@@ -480,8 +568,39 @@ struct v4l2_subdev_pad_config {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
+ *
+ * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
+ * code.
+ * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
+ * code.
+ *
+ * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl
+ * handler code.
+ *
+ * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT ioctl handler code.
+ *
+ * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT ioctl handler code.
+ *
+ * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION ioctl handler code.
+ *
+ * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION ioctl handler code.
+ *
+ * @get_edid: callback for VIDIOC_SUBDEV_G_EDID ioctl handler code.
+ *
+ * @set_edid: callback for VIDIOC_SUBDEV_S_EDID ioctl handler code.
+ *
+ * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler
+ * code.
+ *
+ * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler
+ * code.
+ *
+ * @link_validate: used by the media controller code to check if the links
+ * that belongs to a pipeline can be used for stream.
+ *
* @get_frame_desc: get the current low level media bus frame parameters.
- * @get_frame_desc: set the low level media bus frame parameters, @fd array
+ *
+ * @set_frame_desc: set the low level media bus frame parameters, @fd array
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
@@ -695,4 +814,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd,
#define v4l2_subdev_has_op(sd, o, f) \
((sd)->ops->o && (sd)->ops->o->f)
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+ const struct v4l2_event *ev);
+
#endif
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 8c6e82594..d760aa73e 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -37,7 +37,7 @@ struct videobuf_queue;
*
* about the mmap helpers (videobuf_mmap_*):
*
- * The mmaper function allows to map any subset of contingous buffers.
+ * The mmaper function allows to map any subset of contiguous buffers.
* This includes one mmap() call for all buffers (which the original
* video4linux API uses) as well as one mmap() for every single buffer
* (which v4l2 uses).
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index c192e1b46..589b56c68 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -364,7 +364,9 @@ struct v4l2_fh;
* start_streaming() can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
- *
+ */
+/*
+ * Private elements (won't appear at the DocBook):
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
@@ -407,7 +409,7 @@ struct vb2_queue {
gfp_t gfp_flags;
u32 min_buffers_needed;
-/* private: internal use only */
+ /* private: internal use only */
struct mutex mmap_lock;
enum v4l2_memory memory;
struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
@@ -484,7 +486,8 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblock);
-/**
+
+/*
* vb2_thread_fnc - callback function for use with vb2_thread
*
* This is called whenever a buffer is dequeued in the thread.
@@ -577,7 +580,6 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
* vb2_get_plane_payload() - get bytesused for the plane plane_no
* @vb: buffer for which plane payload should be set
* @plane_no: plane number for which payload should be set
- * @size: payload in bytes
*/
static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no)
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index f05444ca8..6513c7ec3 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -15,9 +15,11 @@
#define _MEDIA_VIDEOBUF2_MEMOPS_H
#include <media/videobuf2-core.h>
+#include <linux/mm.h>
/**
- * vb2_vmarea_handler - common vma refcount tracking handler
+ * struct vb2_vmarea_handler - common vma refcount tracking handler
+ *
* @refcount: pointer to refcount entry in the buffer
* @put: callback to function that decreases buffer refcount
* @arg: argument for @put callback
@@ -30,11 +32,9 @@ struct vb2_vmarea_handler {
extern const struct vm_operations_struct vb2_common_vm_ops;
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
- struct vm_area_struct **res_vma, dma_addr_t *res_pa);
-
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
-void vb2_put_vma(struct vm_area_struct *vma);
-
+struct frame_vector *vb2_create_framevec(unsigned long start,
+ unsigned long length,
+ bool write);
+void vb2_destroy_framevec(struct frame_vector *vec);
#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6cc..f2ffe5bd7 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@ unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
+/*
+ * For EEH, a driver may want to assert a PERST will reload the same image
+ * from flash into the FPGA.
+ *
+ * This is a property of the entire adapter, not a single AFU, so drivers
+ * should set this property with care!
+ */
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+ bool perst_reloads_same_image);
+
#endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index dc03d77ad..a2f59ec98 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -197,6 +197,27 @@
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
+#define LOWPAN_PRIV_SIZE(llpriv_size) \
+ (sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+ LOWPAN_LLTYPE_BTLE,
+ LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+ enum lowpan_lltypes lltype;
+
+ /* must be last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
return skb->len + uncomp_header - ret;
}
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
int
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
const u8 *saddr, const u8 saddr_type,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738bc5..9d446f136 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@ struct tcf_common {
struct gnet_stats_rate_est64 tcfc_rate_est;
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
};
#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
kfree(hf->htab);
}
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+ unsigned long now = jiffies;
+
+ if (tm->lastuse != now)
+ tm->lastuse = now;
+}
+
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
@@ -98,11 +111,10 @@ struct tc_action_ops {
};
int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tc_action *a);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
int tcf_hash_check(u32 index, struct tc_action *a, int bind);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind);
+ int size, int bind, bool cpustats);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_hash_insert(struct tc_action *a);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3a3..b5474b1fc 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -91,6 +91,37 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
+static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
+{
+ if (dev->addr_len != ETH_ALEN)
+ return -1;
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+
+ /*
+ * The zSeries OSA network cards can be shared among various
+ * OS instances, but the OSA cards have only one MAC address.
+ * This leads to duplicate address conflicts in conjunction
+ * with IPv6 if more than one instance uses the same card.
+ *
+ * The driver for these cards can deliver a unique 16-bit
+ * identifier for each instance sharing the same card. It is
+ * placed instead of 0xFFFE in the interface identifier. The
+ * "u" bit of the interface identifier is not inverted in this
+ * case. Hence the resulting interface identifier has local
+ * scope according to RFC2373.
+ */
+ if (dev->dev_id) {
+ eui[3] = (dev->dev_id >> 8) & 0xFF;
+ eui[4] = dev->dev_id & 0xFF;
+ } else {
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ }
+ return 0;
+}
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
@@ -158,8 +189,8 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
- struct flowi6 *fl6);
+ int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+ struct dst_entry **dst, struct flowi6 *fl6);
void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *daddr,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index cb1b9bbda..b36d837c7 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -64,7 +64,7 @@ struct unix_sock {
struct socket_wq peer_wq;
};
-static inline struct unix_sock *unix_sk(struct sock *sk)
+static inline struct unix_sock *unix_sk(const struct sock *sk)
{
return (struct unix_sock *)sk;
}
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d3e..544a0201a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -469,6 +469,7 @@ struct hci_conn {
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
struct delayed_work le_conn_timeout;
+ struct work_struct le_scan_cleanup;
struct device dev;
struct dentry *debugfs;
@@ -512,9 +513,11 @@ struct hci_conn_params {
HCI_AUTO_CONN_DIRECT,
HCI_AUTO_CONN_ALWAYS,
HCI_AUTO_CONN_LINK_LOSS,
+ HCI_AUTO_CONN_EXPLICIT,
} auto_connect;
struct hci_conn *conn;
+ bool explicit_connect;
};
extern struct list_head hci_dev_list;
@@ -639,6 +642,7 @@ enum {
HCI_CONN_DROP,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
+ HCI_CONN_SCANNING,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +812,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +847,9 @@ void hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout, u8 role);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role);
@@ -988,6 +1015,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type);
void hci_uuids_clear(struct hci_dev *hdev);
@@ -1297,7 +1327,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
if (max >= to_multiplier * 8)
return -EINVAL;
- max_latency = (to_multiplier * 8 / max) - 1;
+ max_latency = (to_multiplier * 4 / max) - 1;
if (latency > 499 || latency > max_latency)
return -EINVAL;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a3753..c98afc08c 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
#define L2CAP_A2MP_DEFAULT_MTU 670
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca253..1797235cd 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@ enum {
BOND_OPT_AD_ACTOR_SYS_PRIO,
BOND_OPT_AD_ACTOR_SYSTEM,
BOND_OPT_AD_USER_PORT_KEY,
+ BOND_OPT_NUM_PEER_NOTIF_ALIAS,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 20defc035..c1740a279 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -310,6 +310,13 @@ static inline bool bond_uses_primary(struct bonding *bond)
return bond_mode_uses_primary(BOND_MODE(bond));
}
+static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+ return bond_uses_primary(bond) && slave ? slave->dev : NULL;
+}
+
static inline bool bond_slave_is_up(struct slave *slave)
{
return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 883fe1e7c..f0889a247 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2369,8 +2369,7 @@ struct cfg80211_qos_map {
* method returns 0.)
*
* @mgmt_frame_register: Notify driver that a management frame type was
- * registered. Note that this callback may not sleep, and cannot run
- * concurrently with itself.
+ * registered. The callback is allowed to sleep.
*
* @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
* Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a69a..76b1ffaea 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@ struct cfg802154_ops {
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
+ int (*suspend)(struct wpan_phy *wpan_phy);
+ int (*resume)(struct wpan_phy *wpan_phy);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
unsigned char name_assign_type,
@@ -61,6 +63,8 @@ struct cfg802154_ops {
s8 max_frame_retries);
int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, bool mode);
+ int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq);
};
static inline bool
@@ -171,6 +175,9 @@ struct wpan_dev {
struct list_head list;
struct net_device *netdev;
+ /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+ struct net_device *lowpan_dev;
+
u32 identifier;
/* MAC PIB */
@@ -191,6 +198,9 @@ struct wpan_dev {
bool lbt;
bool promiscuous_mode;
+
+ /* fallback for acknowledgment bit setting */
+ bool ackreq;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 2d1d73cb7..9fcaedf99 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
+ __be32 from, __be32 to, bool pseudohdr);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
- int pseudohdr);
+ bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
- int pseudohdr)
+ bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d39456..ccd6d8bff 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ u32 classid = task_cls_state(current)->classid;
+
+ /* Due to the nature of the classifier it is required to ignore all
+ * packets originating from softirq context as accessing `current'
+ * would lead to false results.
+ *
+ * This test assumes that all callers of dev_queue_xmit() explicitly
+ * disable bh. Knowing this, it is possible to detect softirq based
+ * calls by looking at the number of nested bh disable calls because
+ * softirqs always disables bh.
+ */
+ if (in_serving_softirq()) {
+ /* If there is an sk_classid we'll use that. */
+ if (!skb->sk)
+ return 0;
+
+ classid = skb->sk->sk_classid;
+ }
+
+ return classid;
+}
#else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk)
{
}
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+ return 0;
+}
#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63ba8..b34d812bc 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+ return !!((ds->dsa_port_mask) & (1 << p));
+}
+
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@ struct dsa_switch_driver {
u32 br_port_mask);
int (*port_stp_update)(struct dsa_switch *ds, int port,
u8 state);
- int (*fdb_add)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_del)(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
- int (*fdb_getnext)(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static);
+
+ /*
+ * VLAN support
+ */
+ int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
+ int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
+ int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+ bool untagged);
+ int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+ int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
+ unsigned long *ports, unsigned long *untagged);
+
+ /*
+ * Forwarding database
+ */
+ int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*port_fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, u16 *vid,
+ bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a0..9261d9283 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@ struct dst_entry {
#define DST_FAKE_RTABLE 0x0040
#define DST_XFRM_TUNNEL 0x0080
#define DST_XFRM_QUEUE 0x0100
+#define DST_METADATA 0x0200
unsigned short pending_confirm;
@@ -83,12 +84,13 @@ struct dst_entry {
__u32 __pad2;
#endif
+#ifdef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
-#ifdef CONFIG_64BIT
- long __pad_to_align_refcnt[2];
+ long __pad_to_align_refcnt[1];
#endif
/*
* __refcnt wants to be on a different cache line from
@@ -97,6 +99,9 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+#ifndef CONFIG_64BIT
+ struct lwtunnel_state *lwtstate;
+#endif
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
@@ -202,6 +207,12 @@ static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
p[metric-1] = val;
}
+/* Kernel-internal feature bits that are unallocated in user space. */
+#define DST_FEATURE_ECN_CA (1 << 31)
+
+#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
+#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
+
static inline u32
dst_feature(const struct dst_entry *dst, u32 feature)
{
@@ -284,13 +295,18 @@ static inline void skb_dst_drop(struct sk_buff *skb)
}
}
-static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
{
- nskb->_skb_refdst = oskb->_skb_refdst;
+ nskb->_skb_refdst = refdst;
if (!(nskb->_skb_refdst & SKB_DST_NOREF))
dst_clone(skb_dst(nskb));
}
+static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+{
+ __skb_dst_copy(nskb, oskb->_skb_refdst);
+}
+
/**
* skb_dst_force - makes sure skb dst is refcounted
* @skb: buffer
@@ -356,6 +372,9 @@ static inline int dst_discard(struct sk_buff *skb)
}
void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags);
void __dst_free(struct dst_entry *dst);
struct dst_entry *dst_destroy(struct dst_entry *dst);
@@ -457,7 +476,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
-void dst_init(void);
+void dst_subsys_init(void);
/* Flags for xfrm_lookup flags argument. */
enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 000000000..6816f0fa5
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,141 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+ struct dst_entry dst;
+ union {
+ struct ip_tunnel_info tun_info;
+ } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+ if (md_dst && md_dst->dst.flags & DST_METADATA)
+ return md_dst;
+
+ return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst)
+ return &md_dst->u.tun_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate)
+ return lwt_tun_info(dst->lwtstate);
+
+ return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+static inline struct metadata_dst *tun_rx_dst(int md_size)
+{
+ struct metadata_dst *tun_dst;
+
+ tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
+ if (!tun_dst)
+ return NULL;
+
+ tun_dst->u.tun_info.options_len = 0;
+ tun_dst->u.tun_info.mode = 0;
+ return tun_dst;
+}
+
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ int md_size;
+ struct metadata_dst *new_md;
+
+ if (!md_dst)
+ return ERR_PTR(-EINVAL);
+
+ md_size = md_dst->u.tun_info.options_len;
+ new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+ if (!new_md)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+ sizeof(struct ip_tunnel_info) + md_size);
+ skb_dst_drop(skb);
+ dst_hold(&new_md->dst);
+ skb_dst_set(skb, &new_md->dst);
+ return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+ struct metadata_dst *dst;
+
+ dst = tun_dst_unclone(skb);
+ if (IS_ERR(dst))
+ return NULL;
+
+ return &dst->u.tun_info;
+}
+
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct metadata_dst *tun_dst;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ 0, 0, tunnel_id, flags);
+ return tun_dst;
+}
+
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *info;
+
+ tun_dst = tun_rx_dst(md_size);
+ if (!tun_dst)
+ return NULL;
+
+ info = &tun_dst->u.tun_info;
+ info->mode = IP_TUNNEL_INFO_IPV6;
+ info->key.tun_flags = flags;
+ info->key.tun_id = tunnel_id;
+ info->key.tp_src = 0;
+ info->key.tp_dst = 0;
+
+ info->key.u.ipv6.src = ip6h->saddr;
+ info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.tos = ipv6_get_dsfield(ip6h);
+ info->key.ttl = ip6h->hop_limit;
+ return tun_dst;
+}
+
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55efb..59160de70 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@ struct fib_rule {
u8 action;
/* 3 bytes hole, try to use */
u32 target;
+ __be64 tun_id;
struct fib_rule __rcu *ctarget;
struct net *fr_net;
@@ -65,7 +66,6 @@ struct fib_rules_ops {
struct nlattr **);
int (*fill)(struct fib_rule *, struct sk_buff *,
struct fib_rule_hdr *);
- u32 (*default_pref)(struct fib_rules_ops *ops);
size_t (*nlmsg_payload)(struct fib_rule *);
/* Called after modifications to the rules set, must flush
@@ -117,5 +117,4 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a159d..9b85db85f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,7 @@
#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
+#include <net/flow_dissector.h>
/*
* ifindex generation is per-net namespace, and loopback is
@@ -19,6 +20,10 @@
#define LOOPBACK_IFINDEX 1
+struct flowi_tunnel {
+ __be64 tun_id;
+};
+
struct flowi_common {
int flowic_oif;
int flowic_iif;
@@ -29,7 +34,10 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
+#define FLOWI_FLAG_VRFSRC 0x04
+#define FLOWI_FLAG_SKIP_NH_OIF 0x08
__u32 flowic_secid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
@@ -66,6 +74,7 @@ struct flowi4 {
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
+#define flowi4_tun_key __fl_common.flowic_tun_key
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
@@ -95,6 +104,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
fl4->flowi4_secid = 0;
+ fl4->flowi4_tun_key.tun_id = 0;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
@@ -122,6 +132,7 @@ struct flowi6 {
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid
+#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
__be32 flowlabel;
@@ -165,6 +176,7 @@ struct flowi {
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
#define flowi_secid u.__fl_common.flowic_secid
+#define flowi_tun_key u.__fl_common.flowic_tun_key
} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -233,4 +245,22 @@ void flow_cache_flush(struct net *net);
void flow_cache_flush_deferred(struct net *net);
extern atomic_t flow_cache_genid;
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
+{
+ struct flow_keys keys;
+
+ return __get_hash_from_flowi6(fl6, &keys);
+}
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
+{
+ struct flow_keys keys;
+
+ return __get_hash_from_flowi4(fl4, &keys);
+}
+
#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a8c22419..8c8548cf5 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -2,7 +2,6 @@
#define _NET_FLOW_DISSECTOR_H
#include <linux/types.h>
-#include <linux/skbuff.h>
#include <linux/in6.h>
#include <uapi/linux/if_ether.h>
@@ -13,8 +12,13 @@
struct flow_dissector_key_control {
u16 thoff;
u16 addr_type;
+ u32 flags;
};
+#define FLOW_DIS_IS_FRAGMENT BIT(0)
+#define FLOW_DIS_FIRST_FRAG BIT(1)
+#define FLOW_DIS_ENCAPSULATION BIT(2)
+
/**
* struct flow_dissector_key_basic:
* @thoff: Transport header offset
@@ -123,6 +127,11 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MAX,
};
+#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
+#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1)
+#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2)
+#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3)
+
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
size_t offset; /* offset of struct flow_dissector_key_*
@@ -134,23 +143,6 @@ struct flow_dissector {
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
-void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
- const struct flow_dissector_key *key,
- unsigned int key_count);
-
-bool __skb_flow_dissect(const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- void *target_container,
- void *data, __be16 proto, int nhoff, int hlen);
-
-static inline bool skb_flow_dissect(const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- void *target_container)
-{
- return __skb_flow_dissect(skb, flow_dissector, target_container,
- NULL, 0, 0, 0);
-}
-
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
@@ -170,38 +162,6 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow);
extern struct flow_dissector flow_keys_dissector;
extern struct flow_dissector flow_keys_buf_dissector;
-static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
- struct flow_keys *flow)
-{
- memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
- NULL, 0, 0, 0);
-}
-
-static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
- void *data, __be16 proto,
- int nhoff, int hlen)
-{
- memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
- data, proto, nhoff, hlen);
-}
-
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
-
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
- int thoff, u8 ip_proto)
-{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-
-u32 flow_hash_from_keys(struct flow_keys *keys);
-void __skb_get_hash(struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
-
/* struct flow_keys_digest:
*
* This structure is used to hold a digest of the full flow keys. This is a
@@ -217,4 +177,11 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
+static inline bool flow_keys_have_l4(struct flow_keys *keys)
+{
+ return (keys->ports.ports || keys->tags.flow_label);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+
#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 2a0543a18..3106ed6ea 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,40 +62,9 @@ struct genevehdr {
struct geneve_opt options[];
};
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
- return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
#ifdef CONFIG_INET
-struct geneve_sock;
-
-typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
-
-struct geneve_sock {
- struct list_head list;
- geneve_rcv_t *rcv;
- void *rcv_data;
- struct socket *sock;
- struct rcu_head rcu;
- int refcnt;
- struct udp_offload udp_offloads;
-};
-
-#define GENEVE_VER 0
-#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool no_share, bool ipv6);
-
-void geneve_sock_release(struct geneve_sock *vs);
-
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
- __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum, bool xnet);
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+ u8 name_assign_type, u16 dst_port);
#endif /*ifdef CONFIG_INET */
#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index b53182018..97eafdc47 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
#define GREPROTO_CISCO 0
#define GREPROTO_PPTP 1
#define GREPROTO_MAX 2
@@ -14,91 +20,9 @@ struct gre_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
};
-struct gre_base_hdr {
- __be16 flags;
- __be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
int gre_add_protocol(const struct gre_protocol *proto, u8 version);
int gre_del_protocol(const struct gre_protocol *proto, u8 version);
-struct gre_cisco_protocol {
- int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
- int (*err_handler)(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi);
- u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
- bool csum)
-{
- return iptunnel_handle_offloads(skb, csum,
- csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
- int addend = 4;
-
- if (o_flags&TUNNEL_CSUM)
- addend += 4;
- if (o_flags&TUNNEL_KEY)
- addend += 4;
- if (o_flags&TUNNEL_SEQ)
- addend += 4;
- return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
- __be16 tflags = 0;
-
- if (flags & GRE_CSUM)
- tflags |= TUNNEL_CSUM;
- if (flags & GRE_ROUTING)
- tflags |= TUNNEL_ROUTING;
- if (flags & GRE_KEY)
- tflags |= TUNNEL_KEY;
- if (flags & GRE_SEQ)
- tflags |= TUNNEL_SEQ;
- if (flags & GRE_STRICT)
- tflags |= TUNNEL_STRICT;
- if (flags & GRE_REC)
- tflags |= TUNNEL_REC;
- if (flags & GRE_VERSION)
- tflags |= TUNNEL_VERSION;
-
- return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
- __be16 flags = 0;
-
- if (tflags & TUNNEL_CSUM)
- flags |= GRE_CSUM;
- if (tflags & TUNNEL_ROUTING)
- flags |= GRE_ROUTING;
- if (tflags & TUNNEL_KEY)
- flags |= GRE_KEY;
- if (tflags & TUNNEL_SEQ)
- flags |= GRE_SEQ;
- if (tflags & TUNNEL_STRICT)
- flags |= GRE_STRICT;
- if (tflags & TUNNEL_REC)
- flags |= GRE_REC;
- if (tflags & TUNNEL_VERSION)
- flags |= GRE_VERSION;
-
- return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type);
#endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 0f712c0bc..cf6c74550 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -32,37 +32,28 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
return;
}
- /* We run in BH context */
- spin_lock(&cell->napi_skbs.lock);
-
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
-
- spin_unlock(&cell->napi_skbs.lock);
}
-/* called unser BH context */
+/* called under BH context */
static inline int gro_cell_poll(struct napi_struct *napi, int budget)
{
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
struct sk_buff *skb;
int work_done = 0;
- spin_lock(&cell->napi_skbs.lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
break;
- spin_unlock(&cell->napi_skbs.lock);
napi_gro_receive(napi, skb);
work_done++;
- spin_lock(&cell->napi_skbs.lock);
}
if (work_done < budget)
- napi_complete(napi);
- spin_unlock(&cell->napi_skbs.lock);
+ napi_complete_done(napi, work_done);
return work_done;
}
@@ -77,7 +68,7 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
- skb_queue_head_init(&cell->napi_skbs);
+ __skb_queue_head_init(&cell->napi_skbs);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi);
}
@@ -92,8 +83,9 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
return;
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
netif_napi_del(&cell->napi);
- skb_queue_purge(&cell->napi_skbs);
+ __skb_queue_purge(&cell->napi_skbs);
}
free_percpu(gcells->cells);
gcells->cells = NULL;
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 279f83591..109e3ee91 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
- sock_release(sk->sk_socket);
+ if (sk)
+ sock_release(sk->sk_socket);
}
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a19..b07d12669 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c48022..fc1937698 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
void inet_twsk_free(struct inet_timewait_sock *tw);
void inet_twsk_put(struct inet_timewait_sock *tw);
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
@@ -112,8 +110,20 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, false);
+}
+
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+{
+ __inet_twsk_schedule(tw, timeo, true);
+}
+
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332ddce..4a6009d44 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,16 +15,20 @@
#include <net/ipv6.h>
#include <linux/atomic.h>
-struct inetpeer_addr_base {
- union {
- __be32 a4;
- __be32 a6[4];
- struct in6_addr in6;
- };
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+ __be32 addr;
+ int vif;
};
+#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
+
struct inetpeer_addr {
- struct inetpeer_addr_base addr;
+ union {
+ struct ipv4_addr_key a4;
+ struct in6_addr a6;
+ u32 key[INETPEER_MAXKEYSZ];
+ };
__u16 family;
};
@@ -65,69 +69,33 @@ struct inet_peer_base {
int total;
};
-#define INETPEER_BASE_BIT 0x1UL
-
-static inline struct inet_peer *inetpeer_ptr(unsigned long val)
-{
- BUG_ON(val & INETPEER_BASE_BIT);
- return (struct inet_peer *) val;
-}
+void inet_peer_base_init(struct inet_peer_base *);
-static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
-{
- if (!(val & INETPEER_BASE_BIT))
- return NULL;
- val &= ~INETPEER_BASE_BIT;
- return (struct inet_peer_base *) val;
-}
+void inet_initpeers(void) __init;
-static inline bool inetpeer_ptr_is_peer(unsigned long val)
-{
- return !(val & INETPEER_BASE_BIT);
-}
+#define INETPEER_METRICS_NEW (~(u32) 0)
-static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
- /* This implicitly clears INETPEER_BASE_BIT */
- *val = (unsigned long) peer;
+ iaddr->a4.addr = ip;
+ iaddr->family = AF_INET;
}
-static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
{
- unsigned long val = (unsigned long) peer;
- unsigned long orig = *ptr;
-
- if (!(orig & INETPEER_BASE_BIT) ||
- cmpxchg(ptr, orig, val) != orig)
- return false;
- return true;
+ return iaddr->a4.addr;
}
-static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+ struct in6_addr *in6)
{
- *ptr = (unsigned long) base | INETPEER_BASE_BIT;
+ iaddr->a6 = *in6;
+ iaddr->family = AF_INET6;
}
-static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
{
- unsigned long val = *from;
-
- *to = val;
- if (inetpeer_ptr_is_peer(val)) {
- struct inet_peer *peer = inetpeer_ptr(val);
- atomic_inc(&peer->refcnt);
- }
-}
-
-void inet_peer_base_init(struct inet_peer_base *);
-
-void inet_initpeers(void) __init;
-
-#define INETPEER_METRICS_NEW (~(u32) 0)
-
-static inline bool inet_metrics_new(const struct inet_peer *p)
-{
- return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+ return &iaddr->a6;
}
/* can be called with or without local BH being disabled */
@@ -137,11 +105,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
- int create)
+ int vif, int create)
{
struct inetpeer_addr daddr;
- daddr.addr.a4 = v4daddr;
+ daddr.a4.addr = v4daddr;
+ daddr.a4.vif = vif;
daddr.family = AF_INET;
return inet_getpeer(base, &daddr, create);
}
@@ -152,23 +121,36 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
{
struct inetpeer_addr daddr;
- daddr.addr.in6 = *v6daddr;
+ daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ int i, n;
+
+ if (a->family == AF_INET)
+ n = sizeof(a->a4) / sizeof(u32);
+ else
+ n = sizeof(a->a6) / sizeof(u32);
+
+ for (i = 0; i < n; i++) {
+ if (a->key[i] == b->key[i])
+ continue;
+ if (a->key[i] < b->key[i])
+ return -1;
+ return 1;
+ }
+
+ return 0;
+}
+
/* can be called from BH context or outside */
void inet_putpeer(struct inet_peer *p);
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
void inetpeer_invalidate_tree(struct inet_peer_base *);
-/*
- * temporary check to make sure we dont access rid, tcp_ts,
- * tcp_ts_stamp if no refcount is taken on inet_peer
- */
-static inline void inet_peer_refcheck(const struct inet_peer *p)
-{
- WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
-}
#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2ab..9b9ca2839 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -202,10 +202,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else
+static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+ size_t syncp_offset)
+{
+ return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
@@ -370,22 +380,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline void inet_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct flow_keys keys;
-
- memset(&keys, 0, sizeof(keys));
-
- keys.addrs.v4addrs.src = inet->inet_saddr;
- keys.addrs.v4addrs.dst = inet->inet_daddr;
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
-
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
@@ -474,6 +468,11 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+ return (__force unsigned int) ip;
+}
+
bool ip_call_ra_chain(struct sk_buff *skb);
/*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849c1..aaf9700fc 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@ struct fib6_config {
struct nlattr *fc_mp;
struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
};
struct fib6_node {
@@ -273,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
struct nl_info *info, struct mx6_config *mxc);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
-void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
+ unsigned int flags);
void fib6_run_gc(unsigned long expires, struct net *net, bool force);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index b8529aa1d..fa915fa0f 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -32,6 +32,12 @@ struct __ip6_tnl_parm {
__be32 o_key;
};
+struct ip6_tnl_dst {
+ seqlock_t lock;
+ struct dst_entry __rcu *dst;
+ u32 cookie;
+};
+
/* IPv6 tunnel */
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -39,8 +45,7 @@ struct ip6_tnl {
struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
- struct dst_entry *dst_cache; /* cached dst */
- u32 dst_cookie;
+ struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
int err_count;
unsigned long err_time;
@@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim {
__u8 encap_limit; /* tunnel encapsulation limit */
} __packed;
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
+int ip6_tnl_dst_init(struct ip6_tnl *t);
+void ip6_tnl_dst_destroy(struct ip6_tnl *t);
void ip6_tnl_dst_reset(struct ip6_tnl *t);
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
@@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
- pkt_len = skb->len;
+ pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out_sk(sk, skb);
if (net_xmit_eval(err) == 0) {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b4e..965fa5b1a 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@ struct fib_config {
u32 fc_flow;
u32 fc_nlflags;
struct nl_info fc_nlinfo;
- };
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+};
struct fib_info;
struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
struct rtable __rcu * __percpu *nh_pcpu_rth_output;
struct rtable __rcu *nh_rth_input;
struct fnhe_hash_bucket __rcu *nh_exceptions;
+ struct lwtunnel_state *nh_lwtstate;
};
/*
@@ -233,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
rcu_read_lock();
tb = fib_get_table(net, RT_TABLE_MAIN);
- if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
- err = 0;
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
+
+ if (err == -EAGAIN)
+ err = -ENETUNREACH;
rcu_read_unlock();
@@ -255,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
struct fib_result *res, unsigned int flags)
{
struct fib_table *tb;
- int err;
+ int err = -ENETUNREACH;
flags |= FIB_LOOKUP_NOREF;
if (net->ipv4.fib_has_custom_rules)
@@ -265,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
res->tclassid = 0;
- for (err = 0; !err; err = -ENETUNREACH) {
- tb = rcu_dereference_rtnl(net->ipv4.fib_main);
- if (tb && !fib_table_lookup(tb, flp, res, flags))
- break;
+ tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags);
+
+ if (!err)
+ goto out;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+ if (tb)
+ err = fib_table_lookup(tb, flp, res, flags);
- tb = rcu_dereference_rtnl(net->ipv4.fib_default);
- if (tb && !fib_table_lookup(tb, flp, res, flags))
- break;
- }
+out:
+ if (err == -EAGAIN)
+ err = -ENETUNREACH;
rcu_read_unlock();
@@ -306,7 +317,7 @@ void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
-int fib_sync_down_dev(struct net_device *dev, unsigned long event);
+int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net *net, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
void fib_select_multipath(struct fib_result *res);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb88..f6dafec91 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -4,14 +4,15 @@
#include <linux/if_tunnel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/socket.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
#include <net/dsfield.h>
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
-#include <net/ip.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -22,6 +23,44 @@
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
+ (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
+ FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+
+struct ip_tunnel_key {
+ __be64 tun_id;
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ipv4;
+ struct {
+ struct in6_addr src;
+ struct in6_addr dst;
+ } ipv6;
+ } u;
+ __be16 tun_flags;
+ u8 tos; /* TOS for IPv4, TC for IPv6 */
+ u8 ttl; /* TTL for IPv4, HL for IPv6 */
+ __be16 tp_src;
+ __be16 tp_dst;
+};
+
+/* Flags for ip_tunnel_info mode. */
+#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
+#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
+
+struct ip_tunnel_info {
+ struct ip_tunnel_key key;
+ u8 options_len;
+ u8 mode;
+};
+
/* 6rd prefix/relay information */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm {
@@ -33,8 +72,8 @@ struct ip_tunnel_6rd_parm {
#endif
struct ip_tunnel_encap {
- __u16 type;
- __u16 flags;
+ u16 type;
+ u16 flags;
__be16 sport;
__be16 dport;
};
@@ -51,6 +90,8 @@ struct ip_tunnel_dst {
__be32 saddr;
};
+struct metadata_dst;
+
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@@ -62,8 +103,8 @@ struct ip_tunnel {
* arrived */
/* These four fields used only by GRE */
- __u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ u32 i_seqno; /* The last seen seqno */
+ u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
int mlink;
@@ -84,6 +125,7 @@ struct ip_tunnel {
unsigned int prl_count; /* # of entries in PRL */
int ip_tnl_net_id;
struct gro_cells gro_cells;
+ bool collect_md;
};
#define TUNNEL_CSUM __cpu_to_be16(0x01)
@@ -118,6 +160,7 @@ struct tnl_ptk_info {
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+ struct ip_tunnel __rcu *collect_md_tun;
};
struct ip_tunnel_encap_ops {
@@ -136,6 +179,40 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
+static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
+ __be32 saddr, __be32 daddr,
+ u8 tos, u8 ttl,
+ __be16 tp_src, __be16 tp_dst,
+ __be64 tun_id, __be16 tun_flags)
+{
+ key->tun_id = tun_id;
+ key->u.ipv4.src = saddr;
+ key->u.ipv4.dst = daddr;
+ memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
+ 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+ key->tos = tos;
+ key->ttl = ttl;
+ key->tun_flags = tun_flags;
+
+ /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+ * the upper tunnel are used.
+ * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+ */
+ key->tp_src = tp_src;
+ key->tp_dst = tp_dst;
+
+ /* Clear struct padding. */
+ if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
+ memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
+ 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
+}
+
+static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
+ *tun_info)
+{
+ return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +240,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 key);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
- const struct tnl_ptk_info *tpi, bool log_ecn_error);
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -196,8 +274,10 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df, bool xnet);
+ __be32 src, __be32 dst, u8 proto,
+ u8 tos, u8 ttl, __be16 df, bool xnet);
+struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+ gfp_t flags);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
int gso_type_mask);
@@ -221,6 +301,57 @@ static inline void iptunnel_xmit_stats(int err,
}
}
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
+{
+ return info + 1;
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+ const struct ip_tunnel_info *info)
+{
+ memcpy(to, info + 1, info->options_len);
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ const void *from, int len)
+{
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->options_len = len;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+ return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+ return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
#endif /* CONFIG_INET */
#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4e3731ee4..9b9ca87a4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -846,6 +846,17 @@ struct ipvs_master_sync_state {
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
+struct ipvs_sync_daemon_cfg {
+ union nf_inet_addr mcast_group;
+ int syncid;
+ u16 sync_maxlen;
+ u16 mcast_port;
+ u8 mcast_af;
+ u8 mcast_ttl;
+ /* multicast interface name */
+ char mcast_ifn[IP_VS_IFNAME_MAXLEN];
+};
+
/* IPVS in network namespace */
struct netns_ipvs {
int gen; /* Generation */
@@ -961,15 +972,10 @@ struct netns_ipvs {
spinlock_t sync_buff_lock;
struct task_struct **backup_threads;
int threads_mask;
- int send_mesg_maxlen;
- int recv_mesg_maxlen;
volatile int sync_state;
- volatile int master_syncid;
- volatile int backup_syncid;
struct mutex sync_mutex;
- /* multicast interface name */
- char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
- char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */
+ struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */
/* net name space ptr */
struct net *net; /* Needed by timer routines */
/* Number of heterogeneous destinations, needed becaus heterogeneous
@@ -1408,7 +1414,8 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
/* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+ int state);
int stop_sync_thread(struct net *net, int state);
void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb092..711cca428 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct flow_keys keys;
- memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF 0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
+#define IP6_AUTO_FLOW_LABEL_OPTIN 2
+#define IP6_AUTO_FLOW_LABEL_FORCED 3
- memcpy(&keys.addrs.v6addrs.src, &np->saddr,
- sizeof(keys.addrs.v6addrs.src));
- memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
- sizeof(keys.addrs.v6addrs.dst));
- keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- keys.ports.src = inet->inet_sport;
- keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
- sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
- if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
- u32 hash;
+ u32 hash;
+
+ if (flowlabel ||
+ net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+ (!autolabel &&
+ net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+ return flowlabel;
- hash = skb_get_hash(skb);
+ hash = skb_get_hash_flowi6(skb, fl6);
- /* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
- * attacker is leaked. Only lower 20 bits are relevant.
- */
- hash ^= hash >> 12;
+ /* Since this is being sent on the wire obfuscate hash a bit
+ * to minimize possbility that any useful information to an
+ * attacker is leaked. Only lower 20 bits are relevant.
+ */
+ rol32(hash, 16);
- flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
- if (net->ipv6.sysctl.flowlabel_state_ranges)
- flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
- }
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
return flowlabel;
}
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ switch (net->ipv6.sysctl.auto_flowlabels) {
+ case IP6_AUTO_FLOW_LABEL_OFF:
+ case IP6_AUTO_FLOW_LABEL_OPTIN:
+ default:
+ return 0;
+ case IP6_AUTO_FLOW_LABEL_OPTOUT:
+ case IP6_AUTO_FLOW_LABEL_FORCED:
+ return 1;
+ }
+}
#else
static inline void ip6_set_txhash(struct sock *sk) { }
static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
- __be32 flowlabel, bool autolabel)
+ __be32 flowlabel, bool autolabel,
+ struct flowi6 *fl6)
{
return flowlabel;
}
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+ return 0;
+}
#endif
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 000000000..fce0e35e7
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,175 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS 7
+#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+
+struct lwtunnel_state {
+ __u16 type;
+ __u16 flags;
+ atomic_t refcnt;
+ int (*orig_output)(struct sock *sk, struct sk_buff *skb);
+ int (*orig_input)(struct sk_buff *);
+ int len;
+ __u8 data[0];
+};
+
+struct lwtunnel_encap_ops {
+ int (*build_state)(struct net_device *dev, struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts);
+ int (*output)(struct sock *sk, struct sk_buff *skb);
+ int (*input)(struct sk_buff *skb);
+ int (*fill_encap)(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+ kfree(lws);
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ if (lws)
+ atomic_inc(&lws->refcnt);
+
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+ if (!lws)
+ return;
+
+ if (atomic_dec_and_test(&lws->refcnt))
+ lwtstate_free(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+ return true;
+
+ return false;
+}
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
+
+#else
+
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+ return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **lws)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+ return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+ struct lwtunnel_state *b)
+{
+ return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_input(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 6b1077c2a..bfc569498 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -477,7 +477,9 @@ struct ieee80211_event {
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
- * This field is only valid when the channel type is one of the HT types.
+ * This field is only valid when the channel is a wide HT/VHT channel.
+ * Note that with TDLS this can be the case (channel is HT, protection must
+ * be used from this field) even when the BSS association isn't using HT.
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
@@ -973,6 +975,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
* If this flag is set, the stack cannot do any replay detection
* hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ * flag indicates that the PN was verified for replay protection.
+ * Note that this flag is also currently only supported when a frame
+ * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
* @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1003,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
* number (@ampdu_reference) must be populated and be a distinct number for
* each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- * monitoring purposes only
* @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
* subframes of a single A-MPDU
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1042,8 @@ enum mac80211_rx_flags {
RX_FLAG_NO_SIGNAL_VAL = BIT(12),
RX_FLAG_HT_GF = BIT(13),
RX_FLAG_AMPDU_DETAILS = BIT(14),
- RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
- RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
+ RX_FLAG_PN_VALIDATED = BIT(15),
+ /* bit 16 free */
RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
RX_FLAG_AMPDU_IS_LAST = BIT(18),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
@@ -1491,8 +1494,10 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
*/
struct ieee80211_key_conf {
+ void *drv_priv;
atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
@@ -1675,7 +1680,6 @@ struct ieee80211_sta_rates {
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
@@ -1693,7 +1697,6 @@ struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
- bool mfp;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1888,6 +1891,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
*
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ * than then BSS bandwidth for a TDLS link on the base channel.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -1920,6 +1926,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_CHANCTX_STA_CSA,
IEEE80211_HW_SUPPORTS_CLONED_SKBS,
IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+ IEEE80211_HW_TDLS_WIDER_BW,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3703,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
void ieee80211_restart_hw(struct ieee80211_hw *hw);
/**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- * driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
*
- * See also netif_napi_add().
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
*/
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct napi_struct *napi);
/**
* ieee80211_rx - receive frame
@@ -3731,7 +3746,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
*/
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ ieee80211_rx_napi(hw, skb, NULL);
+}
/**
* ieee80211_rx_irqsafe - receive frame
@@ -4315,19 +4333,6 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k);
/**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
- u8 *k1, u8 *k2);
-
-/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
* @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a4691..b7f996152 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
/**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
-/**
* ieee802154_rx_irqsafe - receive frame
*
* Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 000000000..4757997f7
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+ u32 label[MAX_NEW_LABELS];
+ u32 labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+ return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b3a775125..aba5695fa 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr);
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ struct sk_buff *oskb);
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66f4..8b683841e 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@ struct neigh_statistics {
unsigned long forced_gc_runs; /* number of forced GC runs */
unsigned long unres_discards; /* number of unresolved drops */
+ unsigned long table_fulls; /* times even gc couldn't help */
};
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e951453e0..2dcea635e 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -118,6 +118,9 @@ struct net {
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+ struct list_head nfnl_acct_list;
+#endif
#endif
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 000000000..42008f10d
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 000000000..ed6bd66fa
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 4023c4ce2..e8ad46834 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -250,8 +250,12 @@ void nf_ct_untracked_status_or(unsigned long bits);
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp);
@@ -291,7 +295,9 @@ extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags);
void nf_ct_tmpl_free(struct nf_conn *tmpl);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index f2f0fa3bb..c03f9c42b 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -52,7 +52,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
int __nf_conntrack_confirm(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 3f3aecbc8..dce56f09a 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -4,7 +4,9 @@
#ifndef _NF_CONNTRACK_EXPECT_H
#define _NF_CONNTRACK_EXPECT_H
+
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
extern unsigned int nf_ct_expect_hsize;
extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@ int nf_conntrack_expect_init(void);
void nf_conntrack_expect_fini(void);
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index dec6336bf..7e2b1d025 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -54,7 +54,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
#ifdef CONFIG_NF_CONNTRACK_LABELS
int nf_conntrack_labels_init(void);
void nf_conntrack_labels_fini(void);
+int nf_connlabels_get(struct net *net, unsigned int n_bits);
+void nf_connlabels_put(struct net *net);
#else
static inline int nf_conntrack_labels_init(void) { return 0; }
static inline void nf_conntrack_labels_fini(void) {}
+static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 034efe8d4..4e32512ce 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -1,25 +1,89 @@
#ifndef _NF_CONNTRACK_ZONES_H
#define _NF_CONNTRACK_ZONES_H
-#define NF_CT_DEFAULT_ZONE 0
+#include <linux/netfilter/nf_conntrack_zones_common.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack_extend.h>
-struct nf_conntrack_zone {
- u16 id;
-};
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
+{
+ const struct nf_conntrack_zone *nf_ct_zone = NULL;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
+#endif
+ return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+ zone->id = id;
+ zone->flags = flags;
+ zone->dir = dir;
+
+ return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+ struct nf_conntrack_zone *tmp)
+{
+ const struct nf_conntrack_zone *zone;
+
+ if (!tmpl)
+ return &nf_ct_zone_dflt;
+
+ zone = nf_ct_zone(tmpl);
+ if (zone->flags & NF_CT_FLAG_MARK)
+ zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+ return zone;
+}
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+ const struct nf_conntrack_zone *info)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
- if (nf_ct_zone)
- return nf_ct_zone->id;
+
+ nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+ if (!nf_ct_zone)
+ return -ENOMEM;
+
+ nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+ info->flags);
#endif
- return NF_CT_DEFAULT_ZONE;
+ return 0;
}
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_matches_dir(zone, dir) ?
+ zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b,
+ enum ip_conntrack_dir dir)
+{
+ return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+ nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+ const struct nf_conntrack_zone *b)
+{
+ return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644
index 000000000..6b84cf649
--- /dev/null
+++ b/include/net/netfilter/nft_dup.h
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544a2..c0368db6d 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
int auto_flowlabels;
int icmpv6_time;
int anycast_src_echo_reply;
+ int ip_nonlocal_bind;
int fwmark_reflect;
int idgen_retries;
int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba64..38aa4983e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
+ struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 01fc8c531..d0d0f1e53 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -79,6 +79,7 @@ struct nci_ops {
int (*close)(struct nci_dev *ndev);
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
+ int (*post_setup)(struct nci_dev *ndev);
int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
int (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@ int nci_request(struct nci_dev *ndev,
unsigned long opt),
unsigned long opt, __u32 timeout);
int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f9e58ae45..30afc9a67 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -203,6 +203,7 @@ struct nfc_dev {
int n_vendor_cmds;
struct nfc_ops *ops;
+ struct genl_info *cur_cmd_info;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -318,4 +319,44 @@ static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
return 0;
}
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+ enum nfc_attrs attr,
+ u32 oui, u32 subcmd,
+ int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+ u32 oui, u32 subcmd, int approxlen)
+{
+ return __nfc_alloc_vendor_cmd_reply_skb(dev,
+ NFC_ATTR_VENDOR_DATA,
+ oui,
+ subcmd, approxlen);
+}
+
#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b0ab530d2..cf2713d8b 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,6 +52,8 @@ enum nl802154_commands {
NL802154_CMD_SET_LBT_MODE,
+ NL802154_CMD_SET_ACKREQ_DEFAULT,
+
/* add new commands above here */
/* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@ enum nl802154_attrs {
NL802154_ATTR_SUPPORTED_COMMANDS,
+ NL802154_ATTR_ACKREQ_DEFAULT,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2342bf12c..401038d2f 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -110,10 +110,8 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
+ struct tcf_result *res, bool compat_mode);
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
{
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03af..f46af2568 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -188,8 +188,12 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
void ip_rt_send_redirect(struct sk_buff *skb);
unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+ const struct net_device *dev,
+ __be32 addr);
void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -250,6 +254,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
+ if (netif_index_is_vrf(sock_net(sk), oif))
+ flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
+
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
protocol, flow_flags, dst, src, dport, sport);
}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922d1..18fdb9818 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f87..444faa89a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -340,6 +340,7 @@ extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
+extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
struct Qdisc_class_common {
@@ -513,17 +514,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
- const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+ const struct sk_buff *skb)
{
- struct gnet_stats_basic_cpu *bstats =
- this_cpu_ptr(sch->cpu_bstats);
-
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
@@ -547,16 +551,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
sch->qstats.drops += count;
}
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
{
- sch->qstats.drops++;
+ qstats->drops++;
}
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
{
- struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+ qstats->overlimits++;
+}
- qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+ qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index 4ca4c3fe4..e23717013 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@ struct sock {
void *sk_security;
#endif
__u32 sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
u32 sk_classid;
+#endif
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
@@ -1048,42 +1050,9 @@ struct proto {
#endif
};
-/*
- * Bits in struct cg_proto.flags
- */
-enum cg_proto_flags {
- /* Currently active and new sockets should be assigned to cgroups */
- MEMCG_SOCK_ACTIVE,
- /* It was ever activated; we must disarm static keys on destruction */
- MEMCG_SOCK_ACTIVATED,
-};
-
-struct cg_proto {
- struct page_counter memory_allocated; /* Current allocated memory. */
- struct percpu_counter sockets_allocated; /* Current number of sockets. */
- int memory_pressure;
- long sysctl_mem[3];
- unsigned long flags;
- /*
- * memcg field is used to find which memcg we belong directly
- * Each memcg struct can hold more than one cg_proto, so container_of
- * won't really cut.
- *
- * The elegant solution would be having an inverse function to
- * proto_cgroup in struct proto, but that means polluting the structure
- * for everybody, instead of just for memcg users.
- */
- struct mem_cgroup *memcg;
-};
-
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
-static inline bool memcg_proto_active(struct cg_proto *cg_proto)
-{
- return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
-}
-
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
@@ -1693,6 +1662,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
kuid_t sock_i_uid(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
+static inline void sk_set_txhash(struct sock *sk)
+{
+ sk->sk_txhash = prandom_u32();
+
+ if (unlikely(!sk->sk_txhash))
+ sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+ if (sk->sk_txhash)
+ sk_set_txhash(sk);
+}
+
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
@@ -1717,6 +1700,8 @@ static inline void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+ sk_rethink_txhash(sk);
+
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f118..319baab3b 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@ struct switchdev_obj {
struct switchdev_obj_fdb { /* PORT_FDB */
const unsigned char *addr;
u16 vid;
+ u16 ndm_state;
} fdb;
} u;
};
@@ -157,6 +158,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining);
#else
@@ -271,6 +275,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
return -EOPNOTSUPP;
}
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining)
+{
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index a152e9858..958d69cfb 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -15,7 +15,7 @@
struct tcf_bpf {
struct tcf_common common;
- struct bpf_prog *filter;
+ struct bpf_prog __rcu *filter;
union {
u32 bpf_fd;
u16 bpf_num_ops;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b5789..592a6bc02 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
struct tcf_gact {
struct tcf_common common;
#ifdef CONFIG_GACT_PROB
- u16 tcfg_ptype;
- u16 tcfg_pval;
- int tcfg_paction;
+ u16 tcfg_ptype;
+ u16 tcfg_pval;
+ int tcfg_paction;
+ atomic_t packets;
#endif
};
#define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1c1..dae96bae1 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@ struct tcf_mirred {
int tcfm_eaction;
int tcfm_ifindex;
int tcfm_ok_push;
- struct net_device *tcfm_dev;
+ struct net_device __rcu *tcfm_dev;
struct list_head tcfm_list;
};
#define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9b73d7ee5..cfec17eb0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -281,6 +281,8 @@ extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
extern int sysctl_tcp_autocorking;
extern int sysctl_tcp_invalid_ratelimit;
+extern int sysctl_tcp_pacing_ss_ratio;
+extern int sysctl_tcp_pacing_ca_ratio;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -892,7 +894,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(const char *name);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -995,6 +997,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
#define TCP_INFINITE_SSTHRESH 0x7fffffff
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
{
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1071,7 +1078,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
return tp->snd_cwnd < 2 * tp->max_packets_out;
return tp->is_cwnd_limited;
@@ -1166,6 +1173,19 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
}
u32 tcp_default_init_rwnd(u32 mss);
+void tcp_cwnd_restart(struct sock *sk, s32 delta);
+
+static inline void tcp_slow_start_after_idle_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 delta;
+
+ if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+ return;
+ delta = tcp_time_stamp - tp->lsndtime;
+ if (delta > inet_csk(sk)->icsk_rto)
+ tcp_cwnd_restart(sk, delta);
+}
/* Determine a window scaling and initial window to offer. */
void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0ecad6..1a47946f9 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
static inline void twsk_destructor(struct sock *sk)
{
- BUG_ON(sk == NULL);
- BUG_ON(sk->sk_prot == NULL);
- BUG_ON(sk->sk_prot->twsk_prot == NULL);
if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
sk->sk_prot->twsk_prot->twsk_destructor(sk);
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index c491c1221..cb2f89f20 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -31,7 +31,8 @@ struct udp_port_cfg {
__be16 peer_udp_port;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
- use_udp6_rx_checksums:1;
+ use_udp6_rx_checksums:1,
+ ipv6_v6only:1;
};
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
@@ -93,6 +94,10 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
void udp_tunnel_sock_release(struct socket *sock);
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+ __be16 flags, __be64 tunnel_id,
+ int md_size);
+
static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
bool udp_csum)
{
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644
index 000000000..593e6094d
--- /dev/null
+++ b/include/net/vrf.h
@@ -0,0 +1,178 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_H
+#define __LINUX_NET_VRF_H
+
+struct net_vrf_dev {
+ struct rcu_head rcu;
+ int ifindex; /* ifindex of master dev */
+ u32 tb_id; /* table id for VRF */
+};
+
+struct slave {
+ struct list_head list;
+ struct net_device *dev;
+};
+
+struct slave_queue {
+ struct list_head all_slaves;
+};
+
+struct net_vrf {
+ struct slave_queue queue;
+ struct rtable *rth;
+ u32 tb_id;
+};
+
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+/* called with rcu_read_lock() */
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+ struct net_vrf_dev *vrf_ptr;
+ int ifindex = 0;
+
+ if (!dev)
+ return 0;
+
+ if (netif_is_vrf(dev)) {
+ ifindex = dev->ifindex;
+ } else {
+ vrf_ptr = rcu_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ ifindex = vrf_ptr->ifindex;
+ }
+
+ return ifindex;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ int ifindex;
+
+ rcu_read_lock();
+ ifindex = vrf_master_ifindex_rcu(dev);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
+/* called with rcu_read_lock */
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+ u32 tb_id = 0;
+
+ if (dev) {
+ struct net_vrf_dev *vrf_ptr;
+
+ vrf_ptr = rcu_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ tb_id = vrf_ptr->tb_id;
+ }
+ return tb_id;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+ u32 tb_id;
+
+ rcu_read_lock();
+ tb_id = vrf_dev_table_rcu(dev);
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ u32 tb_id = 0;
+
+ if (!ifindex)
+ return 0;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ tb_id = vrf_dev_table_rcu(dev);
+
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+/* called with rtnl */
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+ u32 tb_id = 0;
+
+ if (dev) {
+ struct net_vrf_dev *vrf_ptr;
+
+ vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+ if (vrf_ptr)
+ tb_id = vrf_ptr->tb_id;
+ }
+ return tb_id;
+}
+
+/* caller has already checked netif_is_vrf(dev) */
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+ struct rtable *rth = ERR_PTR(-ENETUNREACH);
+ struct net_vrf *vrf = netdev_priv(dev);
+
+ if (vrf) {
+ rth = vrf->rth;
+ atomic_inc(&rth->dst.__refcnt);
+ }
+ return rth;
+}
+
+#else
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+ return ERR_PTR(-ENETUNREACH);
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d33..480a319b4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
+#include <net/dst_metadata.h>
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+
struct vxlan_metadata {
- __be32 vni;
u32 gbp;
};
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
- struct vxlan_metadata *md);
-
/* per UDP socket information */
struct vxlan_sock {
struct hlist_node hlist;
- vxlan_rcv_t *rcv;
- void *data;
struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
@@ -117,6 +116,58 @@ struct vxlan_sock {
u32 flags;
};
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ u32 remote_vni;
+ u32 remote_ifindex;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct vxlan_config {
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ u32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ __u16 port_min;
+ __u16 port_max;
+ __u8 tos;
+ __u8 ttl;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+ struct hlist_node hlist; /* vni hash table */
+ struct list_head next; /* vxlan's per namespace list */
+ struct vxlan_sock *vn_sock; /* listening socket */
+ struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
+ struct vxlan_rdst default_dst; /* default destination */
+ u32 flags; /* VXLAN_F_* in vxlan.h */
+
+ struct timer_list age_timer;
+ spinlock_t hash_lock;
+ unsigned int addrcnt;
+ struct gro_cells gro_cells;
+
+ struct vxlan_config cfg;
+
+ struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
#define VXLAN_F_LEARN 0x01
#define VXLAN_F_PROXY 0x02
#define VXLAN_F_RSC 0x04
@@ -130,6 +181,7 @@ struct vxlan_sock {
#define VXLAN_F_REMCSUM_RX 0x400
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
+#define VXLAN_F_COLLECT_METADATA 0x2000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -137,18 +189,16 @@ struct vxlan_sock {
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
- VXLAN_F_REMCSUM_NOPARTIAL)
-
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags);
+ VXLAN_F_REMCSUM_NOPARTIAL | \
+ VXLAN_F_COLLECT_METADATA)
-void vxlan_sock_release(struct vxlan_sock *vs);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf);
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
- bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+ return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
@@ -191,4 +241,10 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
{
}
#endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+ return vs->sock->sk->sk_family;
+}
+
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f0ee97eec..312e3fee9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -285,10 +285,13 @@ struct xfrm_policy_afinfo {
unsigned short family;
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
- struct dst_entry *(*dst_lookup)(struct net *net, int tos,
+ struct dst_entry *(*dst_lookup)(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr);
- int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+ int (*get_saddr)(struct net *net, int oif,
+ xfrm_address_t *saddr,
+ xfrm_address_t *daddr);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 39ed2d2fb..92a7d8591 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,14 +105,16 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- /* compare done u32 at a time */
- IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
struct ib_cm_req_event_param {
struct ib_cm_id *listen_id;
+
+ /* P_Key that was used by the GMP's BTH header */
+ u16 bth_pkey;
+
u8 port;
struct ib_sa_path_rec *primary_path;
@@ -223,6 +225,9 @@ struct ib_cm_apr_event_param {
struct ib_cm_sidr_req_event_param {
struct ib_cm_id *listen_id;
+ __be64 service_id;
+ /* P_Key that was used by the GMP's BTH header */
+ u16 bth_pkey;
u8 port;
u16 pkey;
};
@@ -337,11 +342,6 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
-struct ib_cm_compare_data {
- u32 data[IB_CM_COMPARE_SIZE];
- u32 mask[IB_CM_COMPARE_SIZE];
-};
-
/**
* ib_cm_listen - Initiates listening on the specified service ID for
* connection and service ID resolution requests.
@@ -354,12 +354,13 @@ struct ib_cm_compare_data {
* range of service IDs. If set to 0, the service ID is matched
* exactly. This parameter is ignored if %service_id is set to
* IB_CM_ASSIGN_SERVICE_ID.
- * @compare_data: This parameter is optional. It specifies data that must
- * appear in the private data of a connection request for the specified
- * listen request.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
- struct ib_cm_compare_data *compare_data);
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
+ __be64 service_mask);
+
+struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ __be64 service_id);
struct ib_cm_req_param {
struct ib_sa_path_rec *primary_path;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8422d5a5..188df91d5 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -127,6 +127,23 @@
#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
#define IB_DEFAULT_PKEY_FULL 0xFFFF
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL 0x80
+#define IB_NOTICE_TYPE_URGENT 0x81
+#define IB_NOTICE_TYPE_SECURITY 0x82
+#define IB_NOTICE_TYPE_SM 0x83
+#define IB_NOTICE_TYPE_INFO 0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+
enum {
IB_MGMT_MAD_HDR = 24,
IB_MGMT_MAD_DATA = 232,
@@ -240,6 +257,70 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+struct ib_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 issuer_lid;
+ __be16 toggle_count;
+
+ union {
+ struct {
+ u8 details[54];
+ } raw_data;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __packed ntc_129_131;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* LID where change occurred */
+ u8 reserved2;
+ u8 local_changes; /* low bit - local changes */
+ __be32 new_cap_mask; /* new capability mask */
+ u8 reserved3;
+ u8 change_flags; /* low 3 bits only */
+ } __packed ntc_144;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* lid where sys guid changed */
+ __be16 reserved2;
+ __be64 new_sys_guid;
+ } __packed ntc_145;
+
+ struct {
+ __be16 reserved;
+ __be16 lid;
+ __be16 dr_slid;
+ u8 method;
+ u8 reserved2;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 reserved3;
+ u8 dr_trunc_hop;
+ u8 dr_rtn_path[30];
+ } __packed ntc_256;
+
+ struct {
+ __be16 reserved;
+ __be16 lid1;
+ __be16 lid2;
+ __be32 key;
+ __be32 sl_qp1; /* SL: high 4 bits */
+ __be32 qp2; /* high 8 bits reserved */
+ union ib_gid gid1;
+ union ib_gid gid2;
+ } __packed ntc_257_258;
+
+ } details;
+};
+
/**
* ib_mad_send_buf - MAD data buffer and work request for sends.
* @next: A pointer used to chain together MADs for posting.
@@ -388,7 +469,6 @@ enum {
struct ib_mad_agent {
struct ib_device *device;
struct ib_qp *qp;
- struct ib_mr *mr;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
ib_mad_snoop_handler snoop_handler;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b1f7592e0..709a5331e 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -76,6 +76,8 @@ enum {
IB_OPCODE_UC = 0x20,
IB_OPCODE_RD = 0x40,
IB_OPCODE_UD = 0x60,
+ /* per IBTA 3.1 Table 38, A10.3.2 */
+ IB_OPCODE_CNP = 0x80,
/* operations -- just used to define real constants */
IB_OPCODE_SEND_FIRST = 0x00,
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index 98b9086d7..b439e9884 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -119,10 +119,57 @@ struct ib_port_info {
u8 link_roundtrip_latency[3];
};
+struct ib_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __packed;
+
+struct ib_vl_weight_elem {
+ u8 vl; /* IB: VL is low 4 bits, upper 4 bits reserved */
+ /* OPA: VL is low 5 bits, upper 3 bits reserved */
+ u8 weight;
+};
+
static inline u8
ib_get_smp_direction(struct ib_smp *smp)
{
return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
}
+/*
+ * SM Trap/Notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
+
+/*
+ * Other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+/*
+ * M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE 0x80
+#define IB_NOTICE_TRAP_DR_TRUNC 0x40
+
+
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b0f898e3b..7845fae6f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -48,6 +48,7 @@
#include <linux/rwsem.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
+#include <linux/socket.h>
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h>
@@ -64,6 +65,12 @@ union ib_gid {
} global;
};
+extern union ib_gid zgid;
+
+struct ib_gid_attr {
+ struct net_device *ndev;
+};
+
enum rdma_node_type {
/* IB values map to NodeInfo:NodeType. */
RDMA_NODE_IB_CA = 1,
@@ -284,7 +291,7 @@ enum ib_port_cap_flags {
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
IB_PORT_CLIENT_REG_SUP = 1 << 25,
- IB_PORT_IP_BASED_GIDS = 1 << 26
+ IB_PORT_IP_BASED_GIDS = 1 << 26,
};
enum ib_port_width {
@@ -556,20 +563,18 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
*/
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
-enum ib_mr_create_flags {
- IB_MR_SIGNATURE_EN = 1,
-};
/**
- * ib_mr_init_attr - Memory region init attributes passed to routine
- * ib_create_mr.
- * @max_reg_descriptors: max number of registration descriptors that
- * may be used with registration work requests.
- * @flags: MR creation flags bit mask.
+ * enum ib_mr_type - memory region type
+ * @IB_MR_TYPE_MEM_REG: memory region that is used for
+ * normal registration
+ * @IB_MR_TYPE_SIGNATURE: memory region that is used for
+ * signature operations (data-integrity
+ * capable regions)
*/
-struct ib_mr_init_attr {
- int max_reg_descriptors;
- u32 flags;
+enum ib_mr_type {
+ IB_MR_TYPE_MEM_REG,
+ IB_MR_TYPE_SIGNATURE,
};
/**
@@ -1252,9 +1257,11 @@ struct ib_udata {
};
struct ib_pd {
+ u32 local_dma_lkey;
struct ib_device *device;
struct ib_uobject *uobject;
atomic_t usecnt; /* count all resources */
+ struct ib_mr *local_mr;
};
struct ib_xrcd {
@@ -1488,7 +1495,7 @@ struct ib_cache {
rwlock_t lock;
struct ib_event_handler event_handler;
struct ib_pkey_cache **pkey_cache;
- struct ib_gid_cache **gid_cache;
+ struct ib_gid_table **gid_cache;
u8 *lmc_cache;
};
@@ -1550,6 +1557,8 @@ struct ib_device {
spinlock_t client_data_lock;
struct list_head core_list;
+ /* Access to the client_data_list is protected by the client_data_lock
+ * spinlock and the lists_rwsem read-write semaphore */
struct list_head client_data_list;
struct ib_cache cache;
@@ -1572,9 +1581,47 @@ struct ib_device {
struct ib_port_attr *port_attr);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u8 port_num);
+ /* When calling get_netdev, the HW vendor's driver should return the
+ * net device of device @device at port @port_num or NULL if such
+ * a net device doesn't exist. The vendor driver should call dev_hold
+ * on this net device. The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ struct net_device *(*get_netdev)(struct ib_device *device,
+ u8 port_num);
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
+ /* When calling add_gid, the HW vendor's driver should
+ * add the gid of device @device at gid index @index of
+ * port @port_num to be @gid. Meta-info of that gid (for example,
+ * the network device related to this gid is available
+ * at @attr. @context allows the HW vendor driver to store extra
+ * information together with a GID entry. The HW vendor may allocate
+ * memory to contain this information and store it in @context when a
+ * new GID entry is written to. Params are consistent until the next
+ * call of add_gid or delete_gid. The function should return 0 on
+ * success or error otherwise. The function could be called
+ * concurrently for different ports. This function is only called
+ * when roce_gid_table is used.
+ */
+ int (*add_gid)(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context);
+ /* When calling del_gid, the HW vendor's driver should delete the
+ * gid of device @device at gid index @index of port @port_num.
+ * Upon the deletion of a GID entry, the HW vendor must free any
+ * allocated memory. The caller will clear @context afterwards.
+ * This function is only called when roce_gid_table is used.
+ */
+ int (*del_gid)(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
int (*modify_device)(struct ib_device *device,
@@ -1668,11 +1715,9 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr);
- int (*destroy_mr)(struct ib_mr *mr);
- struct ib_mr * (*create_mr)(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr);
- struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
- int max_page_list_len);
+ struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
int page_list_len);
void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
@@ -1724,6 +1769,7 @@ struct ib_device {
int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
+ void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
struct ib_dma_mapping_ops *dma_ops;
@@ -1761,8 +1807,30 @@ struct ib_device {
struct ib_client {
char *name;
void (*add) (struct ib_device *);
- void (*remove)(struct ib_device *);
-
+ void (*remove)(struct ib_device *, void *client_data);
+
+ /* Returns the net_dev belonging to this ib_client and matching the
+ * given parameters.
+ * @dev: An RDMA device that the net_dev use for communication.
+ * @port: A physical port number on the RDMA device.
+ * @pkey: P_Key that the net_dev uses if applicable.
+ * @gid: A GID that the net_dev uses to communicate.
+ * @addr: An IP address the net_dev is configured with.
+ * @client_data: The device's client data set by ib_set_client_data().
+ *
+ * An ib_client that implements a net_dev on top of RDMA devices
+ * (such as IP over IB) should implement this callback, allowing the
+ * rdma_cm module to find the right net_dev for a given request.
+ *
+ * The caller is responsible for calling dev_put on the returned
+ * netdev. */
+ struct net_device *(*get_net_dev_by_params)(
+ struct ib_device *dev,
+ u8 port,
+ u16 pkey,
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ void *client_data);
struct list_head list;
};
@@ -2071,34 +2139,6 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
}
/**
- * rdma_cap_read_multi_sge - Check if the port of device has the capability
- * RDMA Read Multiple Scatter-Gather Entries.
- * @device: Device to check
- * @port_num: Port number to check
- *
- * iWARP has a restriction that RDMA READ requests may only have a single
- * Scatter/Gather Entry (SGE) in the work request.
- *
- * NOTE: although the linux kernel currently assumes all devices are either
- * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
- * WRITEs, according to Tom Talpey, this is not accurate. There are some
- * devices out there that support more than a single SGE on RDMA READ
- * requests, but do not support the same number of SGEs as they do on
- * RDMA WRITE requests. The linux kernel would need rearchitecting to
- * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
- * suffice with either the device supports the same READ/WRITE SGEs, or
- * it only gets one READ sge.
- *
- * Return: true for any device that allows more than one SGE in RDMA READ
- * requests.
- */
-static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
- u8 port_num)
-{
- return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
-}
-
-/**
* rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
*
* @device: Device
@@ -2115,6 +2155,26 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
return device->port_immutable[port_num].max_mad_size;
}
+/**
+ * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE GID table mechanism manages the various GIDs for a device.
+ *
+ * NOTE: if allocating the port's GID table has failed, this call will still
+ * return true, but any RoCE GID table API will fail.
+ *
+ * Return: true if the port uses RoCE GID table mechanism in order to manage
+ * its GIDs.
+ */
+static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
+ u8 port_num)
+{
+ return rdma_protocol_roce(device, port_num) &&
+ device->add_gid && device->del_gid;
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
@@ -2135,20 +2195,9 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
-/**
- * ib_alloc_pd - Allocates an unused protection domain.
- * @device: The device on which to allocate the protection domain.
- *
- * A protection domain object provides an association between QPs, shared
- * receive queues, address handles, memory regions, and memory windows.
- */
struct ib_pd *ib_alloc_pd(struct ib_device *device);
-/**
- * ib_dealloc_pd - Deallocates a protection domain.
- * @pd: The protection domain to deallocate.
- */
-int ib_dealloc_pd(struct ib_pd *pd);
+void ib_dealloc_pd(struct ib_pd *pd);
/**
* ib_create_ah - Creates an address handle for the given address vector.
@@ -2760,52 +2809,6 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
}
/**
- * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
- * by an HCA.
- * @pd: The protection domain associated assigned to the registered region.
- * @phys_buf_array: Specifies a list of physical buffers to use in the
- * memory region.
- * @num_phys_buf: Specifies the size of the phys_buf_array.
- * @mr_access_flags: Specifies the memory access rights.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
-
-/**
- * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
- * Conceptually, this call performs the functions deregister memory region
- * followed by register physical memory region. Where possible,
- * resources are reused instead of deallocated and reallocated.
- * @mr: The memory region to modify.
- * @mr_rereg_mask: A bit-mask used to indicate which of the following
- * properties of the memory region are being modified.
- * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
- * the new protection domain to associated with the memory region,
- * otherwise, this parameter is ignored.
- * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- * field specifies a list of physical buffers to use in the new
- * translation, otherwise, this parameter is ignored.
- * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
- * field specifies the size of the phys_buf_array, otherwise, this
- * parameter is ignored.
- * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
- * field specifies the new memory access rights, otherwise, this
- * parameter is ignored.
- * @iova_start: The offset of the region's starting I/O virtual address.
- */
-int ib_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
-
-/**
* ib_query_mr - Retrieves information about a specific memory region.
* @mr: The memory region to retrieve information about.
* @mr_attr: The attributes of the specified memory region.
@@ -2821,33 +2824,9 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
*/
int ib_dereg_mr(struct ib_mr *mr);
-
-/**
- * ib_create_mr - Allocates a memory region that may be used for
- * signature handover operations.
- * @pd: The protection domain associated with the region.
- * @mr_init_attr: memory region init attributes.
- */
-struct ib_mr *ib_create_mr(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr);
-
-/**
- * ib_destroy_mr - Destroys a memory region that was created using
- * ib_create_mr and removes it from HW translation tables.
- * @mr: The memory region to destroy.
- *
- * This function can fail, if the memory region has memory windows bound to it.
- */
-int ib_destroy_mr(struct ib_mr *mr);
-
-/**
- * ib_alloc_fast_reg_mr - Allocates memory region usable with the
- * IB_WR_FAST_REG_MR send work request.
- * @pd: The protection domain associated with the region.
- * @max_page_list_len: requested max physical buffer list length to be
- * used with fast register work requests for this MR.
- */
-struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
/**
* ib_alloc_fast_reg_page_list - Allocates a page list array
@@ -3040,4 +3019,8 @@ static inline int ib_check_mr_access(int flags)
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
+ u16 pkey, const union ib_gid *gid,
+ const struct sockaddr *addr);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
new file mode 100644
index 000000000..a0fa975cd
--- /dev/null
+++ b/include/rdma/opa_port_info.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(OPA_PORT_INFO_H)
+#define OPA_PORT_INFO_H
+
+/* Temporary until HFI driver is updated */
+#ifndef USE_PI_LED_ENABLE
+#define USE_PI_LED_ENABLE 0
+#endif
+
+#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
+#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
+
+#define OPA_PORT_PACKET_FORMAT_NOP 0 /* No change */
+#define OPA_PORT_PACKET_FORMAT_8B 1 /* Format 8B */
+#define OPA_PORT_PACKET_FORMAT_9B 2 /* Format 9B */
+#define OPA_PORT_PACKET_FORMAT_10B 4 /* Format 10B */
+#define OPA_PORT_PACKET_FORMAT_16B 8 /* Format 16B */
+
+#define OPA_PORT_LTP_CRC_MODE_NONE 0 /* No change */
+#define OPA_PORT_LTP_CRC_MODE_14 1 /* 14-bit LTP CRC mode (optional) */
+#define OPA_PORT_LTP_CRC_MODE_16 2 /* 16-bit LTP CRC mode */
+#define OPA_PORT_LTP_CRC_MODE_48 4 /* 48-bit LTP CRC mode (optional) */
+#define OPA_PORT_LTP_CRC_MODE_PER_LANE 8 /* 12/16-bit per lane LTP CRC mode */
+
+/* Link Down / Neighbor Link Down Reason; indicated as follows: */
+#define OPA_LINKDOWN_REASON_NONE 0 /* No specified reason */
+#define OPA_LINKDOWN_REASON_RCV_ERROR_0 1
+#define OPA_LINKDOWN_REASON_BAD_PKT_LEN 2
+#define OPA_LINKDOWN_REASON_PKT_TOO_LONG 3
+#define OPA_LINKDOWN_REASON_PKT_TOO_SHORT 4
+#define OPA_LINKDOWN_REASON_BAD_SLID 5
+#define OPA_LINKDOWN_REASON_BAD_DLID 6
+#define OPA_LINKDOWN_REASON_BAD_L2 7
+#define OPA_LINKDOWN_REASON_BAD_SC 8
+#define OPA_LINKDOWN_REASON_RCV_ERROR_8 9
+#define OPA_LINKDOWN_REASON_BAD_MID_TAIL 10
+#define OPA_LINKDOWN_REASON_RCV_ERROR_10 11
+#define OPA_LINKDOWN_REASON_PREEMPT_ERROR 12
+#define OPA_LINKDOWN_REASON_PREEMPT_VL15 13
+#define OPA_LINKDOWN_REASON_BAD_VL_MARKER 14
+#define OPA_LINKDOWN_REASON_RCV_ERROR_14 15
+#define OPA_LINKDOWN_REASON_RCV_ERROR_15 16
+#define OPA_LINKDOWN_REASON_BAD_HEAD_DIST 17
+#define OPA_LINKDOWN_REASON_BAD_TAIL_DIST 18
+#define OPA_LINKDOWN_REASON_BAD_CTRL_DIST 19
+#define OPA_LINKDOWN_REASON_BAD_CREDIT_ACK 20
+#define OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER 21
+#define OPA_LINKDOWN_REASON_BAD_PREEMPT 22
+#define OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT 23
+#define OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT 24
+#define OPA_LINKDOWN_REASON_RCV_ERROR_24 25
+#define OPA_LINKDOWN_REASON_RCV_ERROR_25 26
+#define OPA_LINKDOWN_REASON_RCV_ERROR_26 27
+#define OPA_LINKDOWN_REASON_RCV_ERROR_27 28
+#define OPA_LINKDOWN_REASON_RCV_ERROR_28 29
+#define OPA_LINKDOWN_REASON_RCV_ERROR_29 30
+#define OPA_LINKDOWN_REASON_RCV_ERROR_30 31
+#define OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN 32
+#define OPA_LINKDOWN_REASON_UNKNOWN 33
+/* 34 -reserved */
+#define OPA_LINKDOWN_REASON_REBOOT 35
+#define OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN 36
+/* 37-38 reserved */
+#define OPA_LINKDOWN_REASON_FM_BOUNCE 39
+#define OPA_LINKDOWN_REASON_SPEED_POLICY 40
+#define OPA_LINKDOWN_REASON_WIDTH_POLICY 41
+/* 42-48 reserved */
+#define OPA_LINKDOWN_REASON_DISCONNECTED 49
+#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50
+#define OPA_LINKDOWN_REASON_NOT_INSTALLED 51
+#define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52
+/* 53 reserved */
+#define OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED 54
+/* 55 reserved */
+#define OPA_LINKDOWN_REASON_POWER_POLICY 56
+#define OPA_LINKDOWN_REASON_LINKSPEED_POLICY 57
+#define OPA_LINKDOWN_REASON_LINKWIDTH_POLICY 58
+/* 59 reserved */
+#define OPA_LINKDOWN_REASON_SWITCH_MGMT 60
+#define OPA_LINKDOWN_REASON_SMA_DISABLED 61
+/* 62 reserved */
+#define OPA_LINKDOWN_REASON_TRANSIENT 63
+/* 64-255 reserved */
+
+/* OPA Link Init reason; indicated as follows: */
+/* 3-7; 11-15 reserved; 8-15 cleared on Polling->LinkUp */
+#define OPA_LINKINIT_REASON_NOP 0
+#define OPA_LINKINIT_REASON_LINKUP (1 << 4)
+#define OPA_LINKINIT_REASON_FLAPPING (2 << 4)
+#define OPA_LINKINIT_REASON_CLEAR (8 << 4)
+#define OPA_LINKINIT_OUTSIDE_POLICY (8 << 4)
+#define OPA_LINKINIT_QUARANTINED (9 << 4)
+#define OPA_LINKINIT_INSUFIC_CAPABILITY (10 << 4)
+
+#define OPA_LINK_SPEED_NOP 0x0000 /* Reserved (1-5 Gbps) */
+#define OPA_LINK_SPEED_12_5G 0x0001 /* 12.5 Gbps */
+#define OPA_LINK_SPEED_25G 0x0002 /* 25.78125? Gbps (EDR) */
+
+#define OPA_LINK_WIDTH_1X 0x0001
+#define OPA_LINK_WIDTH_2X 0x0002
+#define OPA_LINK_WIDTH_3X 0x0004
+#define OPA_LINK_WIDTH_4X 0x0008
+
+#define OPA_CAP_MASK3_IsSnoopSupported (1 << 7)
+#define OPA_CAP_MASK3_IsAsyncSC2VLSupported (1 << 6)
+#define OPA_CAP_MASK3_IsAddrRangeConfigSupported (1 << 5)
+#define OPA_CAP_MASK3_IsPassThroughSupported (1 << 4)
+#define OPA_CAP_MASK3_IsSharedSpaceSupported (1 << 3)
+/* reserved (1 << 2) */
+#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
+#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
+
+/**
+ * new MTU values
+ */
+enum {
+ OPA_MTU_8192 = 6,
+ OPA_MTU_10240 = 7,
+};
+
+enum {
+ OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
+ OPA_PORT_PHYS_CONF_STANDARD = 1,
+ OPA_PORT_PHYS_CONF_FIXED = 2,
+ OPA_PORT_PHYS_CONF_VARIABLE = 3,
+ OPA_PORT_PHYS_CONF_SI_PHOTO = 4
+};
+
+enum port_info_field_masks {
+ /* vl.cap */
+ OPA_PI_MASK_VL_CAP = 0x1F,
+ /* port_states.ledenable_offlinereason */
+ OPA_PI_MASK_OFFLINE_REASON = 0x0F,
+ OPA_PI_MASK_LED_ENABLE = 0x40,
+ /* port_states.unsleepstate_downdefstate */
+ OPA_PI_MASK_UNSLEEP_STATE = 0xF0,
+ OPA_PI_MASK_DOWNDEF_STATE = 0x0F,
+ /* port_states.portphysstate_portstate */
+ OPA_PI_MASK_PORT_PHYSICAL_STATE = 0xF0,
+ OPA_PI_MASK_PORT_STATE = 0x0F,
+ /* port_phys_conf */
+ OPA_PI_MASK_PORT_PHYSICAL_CONF = 0x0F,
+ /* collectivemask_multicastmask */
+ OPA_PI_MASK_COLLECT_MASK = 0x38,
+ OPA_PI_MASK_MULTICAST_MASK = 0x07,
+ /* mkeyprotect_lmc */
+ OPA_PI_MASK_MKEY_PROT_BIT = 0xC0,
+ OPA_PI_MASK_LMC = 0x0F,
+ /* smsl */
+ OPA_PI_MASK_SMSL = 0x1F,
+ /* partenforce_filterraw */
+ /* Filter Raw In/Out bits 1 and 2 were removed */
+ OPA_PI_MASK_LINKINIT_REASON = 0xF0,
+ OPA_PI_MASK_PARTITION_ENFORCE_IN = 0x08,
+ OPA_PI_MASK_PARTITION_ENFORCE_OUT = 0x04,
+ /* operational_vls */
+ OPA_PI_MASK_OPERATIONAL_VL = 0x1F,
+ /* sa_qp */
+ OPA_PI_MASK_SA_QP = 0x00FFFFFF,
+ /* sm_trap_qp */
+ OPA_PI_MASK_SM_TRAP_QP = 0x00FFFFFF,
+ /* localphy_overrun_errors */
+ OPA_PI_MASK_LOCAL_PHY_ERRORS = 0xF0,
+ OPA_PI_MASK_OVERRUN_ERRORS = 0x0F,
+ /* clientrereg_subnettimeout */
+ OPA_PI_MASK_CLIENT_REREGISTER = 0x80,
+ OPA_PI_MASK_SUBNET_TIMEOUT = 0x1F,
+ /* port_link_mode */
+ OPA_PI_MASK_PORT_LINK_SUPPORTED = (0x001F << 10),
+ OPA_PI_MASK_PORT_LINK_ENABLED = (0x001F << 5),
+ OPA_PI_MASK_PORT_LINK_ACTIVE = (0x001F << 0),
+ /* port_link_crc_mode */
+ OPA_PI_MASK_PORT_LINK_CRC_SUPPORTED = 0x0F00,
+ OPA_PI_MASK_PORT_LINK_CRC_ENABLED = 0x00F0,
+ OPA_PI_MASK_PORT_LINK_CRC_ACTIVE = 0x000F,
+ /* port_mode */
+ OPA_PI_MASK_PORT_MODE_SECURITY_CHECK = 0x0001,
+ OPA_PI_MASK_PORT_MODE_16B_TRAP_QUERY = 0x0002,
+ OPA_PI_MASK_PORT_MODE_PKEY_CONVERT = 0x0004,
+ OPA_PI_MASK_PORT_MODE_SC2SC_MAPPING = 0x0008,
+ OPA_PI_MASK_PORT_MODE_VL_MARKER = 0x0010,
+ OPA_PI_MASK_PORT_PASS_THROUGH = 0x0020,
+ OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE = 0x0040,
+ /* flit_control.interleave */
+ OPA_PI_MASK_INTERLEAVE_DIST_SUP = (0x0003 << 12),
+ OPA_PI_MASK_INTERLEAVE_DIST_ENABLE = (0x0003 << 10),
+ OPA_PI_MASK_INTERLEAVE_MAX_NEST_TX = (0x001F << 5),
+ OPA_PI_MASK_INTERLEAVE_MAX_NEST_RX = (0x001F << 0),
+
+ /* port_error_action */
+ OPA_PI_MASK_EX_BUFFER_OVERRUN = 0x80000000,
+ /* 7 bits reserved */
+ OPA_PI_MASK_FM_CFG_ERR_EXCEED_MULTICAST_LIMIT = 0x00800000,
+ OPA_PI_MASK_FM_CFG_BAD_CONTROL_FLIT = 0x00400000,
+ OPA_PI_MASK_FM_CFG_BAD_PREEMPT = 0x00200000,
+ OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER = 0x00100000,
+ OPA_PI_MASK_FM_CFG_BAD_CRDT_ACK = 0x00080000,
+ OPA_PI_MASK_FM_CFG_BAD_CTRL_DIST = 0x00040000,
+ OPA_PI_MASK_FM_CFG_BAD_TAIL_DIST = 0x00020000,
+ OPA_PI_MASK_FM_CFG_BAD_HEAD_DIST = 0x00010000,
+ /* 2 bits reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_VL_MARKER = 0x00002000,
+ OPA_PI_MASK_PORT_RCV_PREEMPT_VL15 = 0x00001000,
+ OPA_PI_MASK_PORT_RCV_PREEMPT_ERROR = 0x00000800,
+ /* 1 bit reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_MidTail = 0x00000200,
+ /* 1 bit reserved */
+ OPA_PI_MASK_PORT_RCV_BAD_SC = 0x00000080,
+ OPA_PI_MASK_PORT_RCV_BAD_L2 = 0x00000040,
+ OPA_PI_MASK_PORT_RCV_BAD_DLID = 0x00000020,
+ OPA_PI_MASK_PORT_RCV_BAD_SLID = 0x00000010,
+ OPA_PI_MASK_PORT_RCV_PKTLEN_TOOSHORT = 0x00000008,
+ OPA_PI_MASK_PORT_RCV_PKTLEN_TOOLONG = 0x00000004,
+ OPA_PI_MASK_PORT_RCV_BAD_PKTLEN = 0x00000002,
+ OPA_PI_MASK_PORT_RCV_BAD_LT = 0x00000001,
+
+ /* pass_through.res_drctl */
+ OPA_PI_MASK_PASS_THROUGH_DR_CONTROL = 0x01,
+
+ /* buffer_units */
+ OPA_PI_MASK_BUF_UNIT_VL15_INIT = (0x00000FFF << 11),
+ OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE = (0x0000001F << 6),
+ OPA_PI_MASK_BUF_UNIT_CREDIT_ACK = (0x00000003 << 3),
+ OPA_PI_MASK_BUF_UNIT_BUF_ALLOC = (0x00000003 << 0),
+
+ /* neigh_mtu.pvlx_to_mtu */
+ OPA_PI_MASK_NEIGH_MTU_PVL0 = 0xF0,
+ OPA_PI_MASK_NEIGH_MTU_PVL1 = 0x0F,
+
+ /* neigh_mtu.vlstall_hoq_life */
+ OPA_PI_MASK_VL_STALL = (0x03 << 5),
+ OPA_PI_MASK_HOQ_LIFE = (0x1F << 0),
+
+ /* port_neigh_mode */
+ OPA_PI_MASK_NEIGH_MGMT_ALLOWED = (0x01 << 3),
+ OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS = (0x01 << 2),
+ OPA_PI_MASK_NEIGH_NODE_TYPE = (0x03 << 0),
+
+ /* resptime_value */
+ OPA_PI_MASK_RESPONSE_TIME_VALUE = 0x1F,
+
+ /* mtucap */
+ OPA_PI_MASK_MTU_CAP = 0x0F,
+};
+
+#if USE_PI_LED_ENABLE
+struct opa_port_states {
+ u8 reserved;
+ u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */
+ u8 reserved2;
+ u8 portphysstate_portstate; /* 4 bits, 4 bits */
+};
+#define PI_LED_ENABLE_SUP 1
+#else
+struct opa_port_states {
+ u8 reserved;
+ u8 offline_reason; /* 2 res, 6 bits */
+ u8 reserved2;
+ u8 portphysstate_portstate; /* 4 bits, 4 bits */
+};
+#define PI_LED_ENABLE_SUP 0
+#endif
+
+struct opa_port_state_info {
+ struct opa_port_states port_states;
+ __be16 link_width_downgrade_tx_active;
+ __be16 link_width_downgrade_rx_active;
+};
+
+struct opa_port_info {
+ __be32 lid;
+ __be32 flow_control_mask;
+
+ struct {
+ u8 res; /* was inittype */
+ u8 cap; /* 3 res, 5 bits */
+ __be16 high_limit;
+ __be16 preempt_limit;
+ u8 arb_high_cap;
+ u8 arb_low_cap;
+ } vl;
+
+ struct opa_port_states port_states;
+ u8 port_phys_conf; /* 4 res, 4 bits */
+ u8 collectivemask_multicastmask; /* 2 res, 3, 3 */
+ u8 mkeyprotect_lmc; /* 2 bits, 2 res, 4 bits */
+ u8 smsl; /* 3 res, 5 bits */
+
+ u8 partenforce_filterraw; /* bit fields */
+ u8 operational_vls; /* 3 res, 5 bits */
+ __be16 pkey_8b;
+ __be16 pkey_10b;
+ __be16 mkey_violations;
+
+ __be16 pkey_violations;
+ __be16 qkey_violations;
+ __be32 sm_trap_qp; /* 8 bits, 24 bits */
+
+ __be32 sa_qp; /* 8 bits, 24 bits */
+ u8 neigh_port_num;
+ u8 link_down_reason;
+ u8 neigh_link_down_reason;
+ u8 clientrereg_subnettimeout; /* 1 bit, 2 bits, 5 */
+
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 active;
+ } link_speed;
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 active;
+ } link_width;
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ __be16 tx_active;
+ __be16 rx_active;
+ } link_width_downgrade;
+ __be16 port_link_mode; /* 1 res, 5 bits, 5 bits, 5 bits */
+ __be16 port_ltp_crc_mode; /* 4 res, 4 bits, 4 bits, 4 bits */
+
+ __be16 port_mode; /* 9 res, bit fields */
+ struct {
+ __be16 supported;
+ __be16 enabled;
+ } port_packet_format;
+ struct {
+ __be16 interleave; /* 2 res, 2,2,5,5 */
+ struct {
+ __be16 min_initial;
+ __be16 min_tail;
+ u8 large_pkt_limit;
+ u8 small_pkt_limit;
+ u8 max_small_pkt_limit;
+ u8 preemption_limit;
+ } preemption;
+ } flit_control;
+
+ __be32 reserved4;
+ __be32 port_error_action; /* bit field */
+
+ struct {
+ u8 egress_port;
+ u8 res_drctl; /* 7 res, 1 */
+ } pass_through;
+ __be16 mkey_lease_period;
+ __be32 buffer_units; /* 9 res, 12, 5, 3, 3 */
+
+ __be32 reserved5;
+ __be32 sm_lid;
+
+ __be64 mkey;
+
+ __be64 subnet_prefix;
+
+ struct {
+ u8 pvlx_to_mtu[OPA_MAX_VLS/2]; /* 4 bits, 4 bits */
+ } neigh_mtu;
+
+ struct {
+ u8 vlstall_hoqlife; /* 3 bits, 5 bits */
+ } xmit_q[OPA_MAX_VLS];
+
+ struct {
+ u8 addr[16];
+ } ipaddr_ipv6;
+
+ struct {
+ u8 addr[4];
+ } ipaddr_ipv4;
+
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+
+ __be64 neigh_node_guid;
+
+ __be32 ib_cap_mask;
+ __be16 reserved9; /* was ib_cap_mask2 */
+ __be16 opa_cap_mask;
+
+ __be32 reserved10; /* was link_roundtrip_latency */
+ __be16 overall_buffer_space;
+ __be16 reserved11; /* was max_credit_hint */
+
+ __be16 diag_code;
+ struct {
+ u8 buffer;
+ u8 wire;
+ } replay_depth;
+ u8 port_neigh_mode;
+ u8 mtucap; /* 4 res, 4 bits */
+
+ u8 resptimevalue; /* 3 res, 5 bits */
+ u8 local_port_num;
+ u8 reserved12;
+ u8 reserved13; /* was guid_cap */
+} __attribute__ ((packed));
+
+#endif /* OPA_PORT_INFO_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 29063e84c..4a529ef47 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -40,6 +40,10 @@
#define OPA_SMP_DR_DATA_SIZE 1872
#define OPA_SMP_MAX_PATH_HOPS 64
+#define OPA_MAX_VLS 32
+#define OPA_MAX_SLS 32
+#define OPA_MAX_SCS 32
+
#define OPA_SMI_CLASS_VERSION 0x80
#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
@@ -73,6 +77,49 @@ struct opa_smp {
} __packed;
+/* Subnet management attributes */
+/* ... */
+#define OPA_ATTRIB_ID_NODE_DESCRIPTION cpu_to_be16(0x0010)
+#define OPA_ATTRIB_ID_NODE_INFO cpu_to_be16(0x0011)
+#define OPA_ATTRIB_ID_PORT_INFO cpu_to_be16(0x0015)
+#define OPA_ATTRIB_ID_PARTITION_TABLE cpu_to_be16(0x0016)
+#define OPA_ATTRIB_ID_SL_TO_SC_MAP cpu_to_be16(0x0017)
+#define OPA_ATTRIB_ID_VL_ARBITRATION cpu_to_be16(0x0018)
+#define OPA_ATTRIB_ID_SM_INFO cpu_to_be16(0x0020)
+#define OPA_ATTRIB_ID_CABLE_INFO cpu_to_be16(0x0032)
+#define OPA_ATTRIB_ID_AGGREGATE cpu_to_be16(0x0080)
+#define OPA_ATTRIB_ID_SC_TO_SL_MAP cpu_to_be16(0x0082)
+#define OPA_ATTRIB_ID_SC_TO_VLR_MAP cpu_to_be16(0x0083)
+#define OPA_ATTRIB_ID_SC_TO_VLT_MAP cpu_to_be16(0x0084)
+#define OPA_ATTRIB_ID_SC_TO_VLNT_MAP cpu_to_be16(0x0085)
+/* ... */
+#define OPA_ATTRIB_ID_PORT_STATE_INFO cpu_to_be16(0x0087)
+/* ... */
+#define OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE cpu_to_be16(0x008A)
+/* ... */
+
+struct opa_node_description {
+ u8 data[64];
+} __attribute__ ((packed));
+
+struct opa_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be32 reserved;
+ __be64 system_image_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3]; /* network byte order */
+} __attribute__ ((packed));
+
+#define OPA_PARTITION_TABLE_BLK_SIZE 32
+
static inline u8
opa_get_smp_direction(struct opa_smp *smp)
{
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 0790882e0..585266144 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -77,4 +77,11 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int group, gfp_t flags);
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
#endif /* _RDMA_NETLINK_H */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 676b03b78..11571b2a8 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -61,4 +61,9 @@ static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr);
+extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
+extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+ int desc_type);
+
#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index ae84b2214..fe89d7cd6 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -57,9 +57,10 @@ enum scsi_device_event {
SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
+ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
- SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED,
+ SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
};
@@ -195,34 +196,13 @@ struct scsi_device {
struct execute_work ew; /* used to get process context on put */
struct work_struct requeue_work;
- struct scsi_dh_data *scsi_dh_data;
+ struct scsi_device_handler *handler;
+ void *handler_data;
+
enum scsi_device_state sdev_state;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
-typedef void (*activate_complete)(void *, int);
-struct scsi_device_handler {
- /* Used by the infrastructure */
- struct list_head list; /* list of scsi_device_handlers */
-
- /* Filled by the hardware handler */
- struct module *module;
- const char *name;
- int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
- struct scsi_dh_data *(*attach)(struct scsi_device *);
- void (*detach)(struct scsi_device *);
- int (*activate)(struct scsi_device *, activate_complete, void *);
- int (*prep_fn)(struct scsi_device *, struct request *);
- int (*set_params)(struct scsi_device *, const char *);
- bool (*match)(struct scsi_device *);
-};
-
-struct scsi_dh_data {
- struct scsi_device_handler *scsi_dh;
- struct scsi_device *sdev;
- struct kref kref;
-};
-
#define to_scsi_device(d) \
container_of(d, struct scsi_device, sdev_gendev)
#define class_to_sdev(d) \
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 620c723ee..85d731746 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -55,11 +55,26 @@ enum {
SCSI_DH_NOSYS,
SCSI_DH_DRIVER_MAX,
};
-#if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE)
+
+typedef void (*activate_complete)(void *, int);
+struct scsi_device_handler {
+ /* Used by the infrastructure */
+ struct list_head list; /* list of scsi_device_handlers */
+
+ /* Filled by the hardware handler */
+ struct module *module;
+ const char *name;
+ int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+ int (*attach)(struct scsi_device *);
+ void (*detach)(struct scsi_device *);
+ int (*activate)(struct scsi_device *, activate_complete, void *);
+ int (*prep_fn)(struct scsi_device *, struct request *);
+ int (*set_params)(struct scsi_device *, const char *);
+};
+
+#ifdef CONFIG_SCSI_DH
extern int scsi_dh_activate(struct request_queue *, activate_complete, void *);
-extern int scsi_dh_handler_exist(const char *);
extern int scsi_dh_attach(struct request_queue *, const char *);
-extern void scsi_dh_detach(struct request_queue *);
extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t);
extern int scsi_dh_set_params(struct request_queue *, const char *);
#else
@@ -69,18 +84,10 @@ static inline int scsi_dh_activate(struct request_queue *req,
fn(data, 0);
return 0;
}
-static inline int scsi_dh_handler_exist(const char *name)
-{
- return 0;
-}
static inline int scsi_dh_attach(struct request_queue *req, const char *name)
{
return SCSI_DH_NOSYS;
}
-static inline void scsi_dh_detach(struct request_queue *q)
-{
- return;
-}
static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
gfp_t gfp)
{
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 8d1d7fa67..dbb8c640e 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -4,6 +4,7 @@
#include <linux/scatterlist.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_common.h>
struct scsi_device;
struct Scsi_Host;
@@ -21,14 +22,9 @@ static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
-extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
- int desc_type);
-
extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
-extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 2555ee534..6183d20a0 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -241,6 +241,7 @@ struct iscsi_cls_session {
/* recovery fields */
int recovery_tmo;
+ bool recovery_tmo_sysfs_override;
struct delayed_work recovery_work;
unsigned int target_id;
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index b019e3465..961b821b6 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -22,6 +22,7 @@
#define TEGRA114 0x35
#define TEGRA124 0x40
#define TEGRA132 0x13
+#define TEGRA210 0x21
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
@@ -47,10 +48,11 @@ struct tegra_sku_info {
int cpu_speedo_id;
int cpu_speedo_value;
int cpu_iddq_value;
- int core_process_id;
+ int soc_process_id;
int soc_speedo_id;
- int gpu_speedo_id;
+ int soc_speedo_value;
int gpu_process_id;
+ int gpu_speedo_id;
int gpu_speedo_value;
enum tegra_revision revision;
};
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index bf2058690..44202ff89 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
unsigned int reg;
};
-struct tegra_smmu_ops {
- void (*flush_dcache)(struct page *page, unsigned long offset,
- size_t size);
-};
-
struct tegra_smmu_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -68,8 +63,6 @@ struct tegra_smmu_soc {
unsigned int num_tlb_lines;
unsigned int num_asids;
-
- const struct tegra_smmu_ops *ops;
};
struct tegra_mc;
@@ -103,6 +96,8 @@ struct tegra_mc_soc {
unsigned int num_address_bits;
unsigned int atom_size;
+ u8 client_id_mask;
+
const struct tegra_smmu_soc *smmu;
};
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index f5c0de43a..d18efe402 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -67,6 +67,11 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
#define TEGRA_POWERGATE_XUSBC 22
#define TEGRA_POWERGATE_VIC 23
#define TEGRA_POWERGATE_IRAM 24
+#define TEGRA_POWERGATE_NVDEC 25
+#define TEGRA_POWERGATE_NVJPG 26
+#define TEGRA_POWERGATE_AUD 27
+#define TEGRA_POWERGATE_DFD 28
+#define TEGRA_POWERGATE_VE2 29
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b49..74bc85473 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@ static inline int snd_ac97_update_power(struct snd_ac97 *ac97, int reg,
void snd_ac97_suspend(struct snd_ac97 *ac97);
void snd_ac97_resume(struct snd_ac97 *ac97);
#endif
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+ unsigned int id_mask);
/* quirk types */
enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index ff9914083..930b41e5a 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,12 +4,15 @@
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
+#include <drm/i915_component.h>
+
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
int snd_hdac_get_display_clk(struct hdac_bus *bus);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
#else
static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
{
@@ -31,6 +34,10 @@ static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
{
return 0;
}
+static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
+{
+ return -ENODEV;
+}
#endif
#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e523..2ae8812d7 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_SPB_BASE 0x08
/* Interval used to calculate the iterating register offset */
#define AZX_SPB_INTERVAL 0x08
+/* SPIB base */
+#define AZX_SPB_SPIB 0x00
+/* SPIB MAXFIFO base*/
+#define AZX_SPB_MAXFIFO 0x04
/* registers of Global Time Synchronization Capability Structure */
#define AZX_GTS_CAP_ID 0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fde8..49bc836fc 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@ int snd_hdac_device_register(struct hdac_device *codec);
void snd_hdac_device_unregister(struct hdac_device *codec);
int snd_hdac_refresh_widgets(struct hdac_device *codec);
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@ static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
}
#ifdef CONFIG_PM
-void snd_hdac_power_up(struct hdac_device *codec);
-void snd_hdac_power_down(struct hdac_device *codec);
-void snd_hdac_power_up_pm(struct hdac_device *codec);
-void snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_power_up(struct hdac_device *codec);
+int snd_hdac_power_down(struct hdac_device *codec);
+int snd_hdac_power_up_pm(struct hdac_device *codec);
+int snd_hdac_power_down_pm(struct hdac_device *codec);
#else
-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
#endif
/*
@@ -437,6 +438,8 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+ int dir, int stream_tag);
int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df151..94210dcdb 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@ int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define ebus_to_hbus(ebus) (&(ebus)->bus)
#define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@ enum hdac_ext_stream_type {
* @hstream: hdac_stream
* @pphc_addr: processing pipe host stream pointer
* @pplc_addr: processing pipe link stream pointer
+ * @spib_addr: software position in buffers stream pointer
+ * @fifo_addr: software position Max fifos stream pointer
* @decoupled: stream host and link is decoupled
* @link_locked: link is locked
* @link_prepared: link is prepared
@@ -73,6 +76,9 @@ struct hdac_ext_stream {
void __iomem *pphc_addr;
void __iomem *pplc_addr;
+ void __iomem *spib_addr;
+ void __iomem *fifo_addr;
+
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
@@ -99,6 +105,11 @@ void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+ struct hdac_ext_stream *stream, u32 value);
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+ struct hdac_ext_stream *stream);
+
void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@ struct hdac_ext_link {
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
int stream);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
writew(((readw(addr + reg) & ~(mask)) | (val)), \
addr + reg)
+
+struct hdac_ext_device;
+
+/* ops common to all codec drivers */
+struct hdac_ext_codec_ops {
+ int (*build_controls)(struct hdac_ext_device *dev);
+ int (*init)(struct hdac_ext_device *dev);
+ void (*free)(struct hdac_ext_device *dev);
+};
+
+struct hda_dai_map {
+ char *dai_name;
+ hda_nid_t nid;
+ u32 maxbps;
+};
+
+#define HDA_MAX_NIDS 16
+
+/**
+ * struct hdac_ext_device - HDAC Ext device
+ *
+ * @hdac: hdac core device
+ * @nid_list - the dai map which matches the dai-name with the nid
+ * @map_cur_idx - the idx in use in dai_map
+ * @ops - the hda codec ops common to all codec drivers
+ * @pvt_data - private data, for asoc contains asoc codec object
+ */
+struct hdac_ext_device {
+ struct hdac_device hdac;
+ struct hdac_ext_bus *ebus;
+
+ /* soc-dai to nid map */
+ struct hda_dai_map nid_list[HDA_MAX_NIDS];
+ unsigned int map_cur_idx;
+
+ /* codec ops */
+ struct hdac_ext_codec_ops ops;
+
+ void *private_data;
+};
+
+#define to_ehdac_device(dev) (container_of((dev), \
+ struct hdac_ext_device, hdac))
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_ext_driver {
+ struct hdac_driver hdac;
+
+ int (*probe)(struct hdac_ext_device *dev);
+ int (*remove)(struct hdac_ext_device *dev);
+ void (*shutdown)(struct hdac_ext_device *dev);
+};
+
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
+
+#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+
#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c17..bb7b2ebfe 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@ struct rsnd_src_platform_info {
/*
* flags
*/
+struct rsnd_ctu_platform_info {
+ u32 flags;
+};
+
+struct rsnd_mix_platform_info {
+ u32 flags;
+};
+
struct rsnd_dvc_platform_info {
u32 flags;
};
@@ -68,6 +76,8 @@ struct rsnd_dvc_platform_info {
struct rsnd_dai_path_info {
struct rsnd_ssi_platform_info *ssi;
struct rsnd_src_platform_info *src;
+ struct rsnd_ctu_platform_info *ctu;
+ struct rsnd_mix_platform_info *mix;
struct rsnd_dvc_platform_info *dvc;
};
@@ -93,6 +103,10 @@ struct rcar_snd_info {
int ssi_info_nr;
struct rsnd_src_platform_info *src_info;
int src_info_nr;
+ struct rsnd_ctu_platform_info *ctu_info;
+ int ctu_info_nr;
+ struct rsnd_mix_platform_info *mix_info;
+ int mix_info_nr;
struct rsnd_dvc_platform_info *dvc_info;
int dvc_info_nr;
struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 000000000..7fffeaa84
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt286.h -- Platform data for RT286
+ *
+ * Copyright 2013 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT298_H
+#define __LINUX_SND_RT298_H
+
+struct rt298_platform_data {
+ bool cbj_en; /*combo jack enable*/
+ bool gpio2_en; /*GPIO2 enable*/
+ bool suspend_power_off; /* power is off during suspend */
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a898..5abba037d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
/* dapm events */
void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@ struct snd_soc_dapm_route {
struct snd_soc_dapm_path {
const char *name;
- /* source (input) and sink (output) widgets */
- struct snd_soc_dapm_widget *source;
- struct snd_soc_dapm_widget *sink;
+ /*
+ * source (input) and sink (output) widgets
+ * The union is for convience, since it is a lot nicer to type
+ * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
+ */
+ union {
+ struct {
+ struct snd_soc_dapm_widget *source;
+ struct snd_soc_dapm_widget *sink;
+ };
+ struct snd_soc_dapm_widget *node[2];
+ };
/* status */
u32 connect:1; /* source and sink widgets are connected */
@@ -524,8 +534,7 @@ struct snd_soc_dapm_path {
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
- struct list_head list_source;
- struct list_head list_sink;
+ struct list_head list_node[2];
struct list_head list_kcontrol;
struct list_head list;
};
@@ -559,8 +568,7 @@ struct snd_soc_dapm_widget {
unsigned char new_power:1; /* power from this run */
unsigned char power_checked:1; /* power checked this run */
unsigned char is_supply:1; /* Widget is a supply type widget */
- unsigned char is_sink:1; /* Widget is a sink type widget */
- unsigned char is_source:1; /* Widget is a source type widget */
+ unsigned char is_ep:2; /* Widget is a endpoint type widget */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@ struct snd_soc_dapm_widget {
struct snd_kcontrol **kcontrols;
struct snd_soc_dobj dobj;
- /* widget input and outputs */
- struct list_head sources;
- struct list_head sinks;
+ /* widget input and output edges */
+ struct list_head edges[2];
/* used during DAPM updates */
struct list_head work_list;
struct list_head power_list;
struct list_head dirty;
- int inputs;
- int outputs;
+ int endpoints[2];
struct clk *clk;
};
@@ -672,4 +678,58 @@ static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
return dapm->bias_level;
}
+enum snd_soc_dapm_direction {
+ SND_SOC_DAPM_DIR_IN,
+ SND_SOC_DAPM_DIR_OUT
+};
+
+#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
+
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ * specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ * incoming or outgoing widgets
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
+ list_for_each_entry(p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ * specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ * incoming or outgoing widgets
+ * @p: The path iterator variable
+ * @next_p: Temporary storage for the next path
+ *
+ * This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ * it is safe to remove the current path from the list while iterating
+ */
+#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
+ list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
+ * widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
+ snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
+
+/**
+ * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
+ * a widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_source_path(w, p) \
+ snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
+
#endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41df..086cd7ff6 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@ struct snd_soc_tplg_kcontrol_ops {
struct snd_ctl_elem_info *uinfo);
};
+/* Bytes ext operations, for TLV byte controls */
+struct snd_soc_tplg_bytes_ext_ops {
+ u32 id;
+ int (*get)(unsigned int __user *bytes, unsigned int size);
+ int (*put)(const unsigned int __user *bytes, unsigned int size);
+};
+
/*
* DAPM widget event handlers - used to map handlers onto widgets.
*/
@@ -136,9 +143,13 @@ struct snd_soc_tplg_ops {
int (*manifest)(struct snd_soc_component *,
struct snd_soc_tplg_manifest *);
- /* bespoke kcontrol handlers available for binding */
+ /* vendor specific kcontrol handlers available for binding */
const struct snd_soc_tplg_kcontrol_ops *io_ops;
int io_ops_count;
+
+ /* vendor specific bytes ext handlers available for binding */
+ const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+ int bytes_ext_ops_count;
};
#ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf9d..26ede1459 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -86,7 +86,7 @@
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.tlv.p = (tlv_array),\
- .info = snd_soc_info_volsw, \
+ .info = snd_soc_info_volsw_sx, \
.get = snd_soc_get_volsw_sx,\
.put = snd_soc_put_volsw_sx, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -156,7 +156,7 @@
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw, \
+ .info = snd_soc_info_volsw_sx, \
.get = snd_soc_get_volsw_sx, \
.put = snd_soc_put_volsw_sx, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -526,7 +526,8 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
#ifdef CONFIG_SND_SOC_AC97_BUS
struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+ unsigned int id, unsigned int id_mask);
void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -573,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
+int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info
int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
@@ -619,6 +622,7 @@ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
* @pin: name of the pin to update
* @mask: bits to check for in reported jack status
* @invert: if non-zero then pin is enabled when status is not reported
+ * @list: internal list entry
*/
struct snd_soc_jack_pin {
struct list_head list;
@@ -635,7 +639,7 @@ struct snd_soc_jack_pin {
* @jack_type: type of jack that is expected for this voltage
* @debounce_time: debounce_time for jack, codec driver should wait for this
* duration before reading the adc for voltages
- * @:list: list container
+ * @list: internal list entry
*/
struct snd_soc_jack_zone {
unsigned int min_mv;
@@ -651,12 +655,12 @@ struct snd_soc_jack_zone {
* @gpio: legacy gpio number
* @idx: gpio descriptor index within the function of the GPIO
* consumer device
- * @gpiod_dev GPIO consumer device
+ * @gpiod_dev: GPIO consumer device
* @name: gpio name. Also as connection ID for the GPIO consumer
* device function name lookup
* @report: value to report when jack detected
* @invert: report presence in low state
- * @debouce_time: debouce time in ms
+ * @debounce_time: debounce time in ms
* @wake: enable as wake source
* @jack_status_check: callback function which overrides the detection
* to provide more complex checks (eg, reading an
@@ -672,11 +676,13 @@ struct snd_soc_jack_gpio {
int debounce_time;
bool wake;
+ /* private: */
struct snd_soc_jack *jack;
struct delayed_work work;
struct gpio_desc *desc;
void *data;
+ /* public: */
int (*jack_status_check)(void *data);
};
@@ -758,7 +764,6 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
- unsigned int probed:1;
struct list_head list;
@@ -792,7 +797,6 @@ struct snd_soc_component {
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
- struct snd_soc_dapm_context *dapm_ptr;
const struct snd_kcontrol_new *controls;
unsigned int num_controls;
@@ -832,9 +836,6 @@ struct snd_soc_codec {
/* component */
struct snd_soc_component component;
- /* Don't access this directly, use snd_soc_codec_get_dapm() */
- struct snd_soc_dapm_context dapm;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_reg;
#endif
@@ -1277,7 +1278,7 @@ static inline struct snd_soc_component *snd_soc_dapm_to_component(
static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
struct snd_soc_dapm_context *dapm)
{
- return container_of(dapm, struct snd_soc_codec, dapm);
+ return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
}
/**
@@ -1302,7 +1303,7 @@ static inline struct snd_soc_platform *snd_soc_dapm_to_platform(
static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
struct snd_soc_component *component)
{
- return component->dapm_ptr;
+ return &component->dapm;
}
/**
@@ -1314,12 +1315,12 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
struct snd_soc_codec *codec)
{
- return &codec->dapm;
+ return snd_soc_component_get_dapm(&codec->component);
}
/**
* snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @codec: The CODEC for which to initialize the DAPM bias level
* @level: The DAPM level to initialize to
*
* Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1605,10 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *slots,
unsigned int *slot_width);
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname);
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 898be3a8d..6d8f8fba3 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
#define WM8904_MIC_REGS 2
#define WM8904_GPIO_REGS 4
#define WM8904_DRC_REGS 4
-#define WM8904_EQ_REGS 25
+#define WM8904_EQ_REGS 24
/**
* DRC configurations are specified with a label and a set of register
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 7e7f8875a..373d33420 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -62,6 +62,8 @@
/* T10 protection information disabled by default */
#define TA_DEFAULT_T10_PI 0
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
+/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
+#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -517,7 +519,6 @@ struct iscsi_conn {
u16 cid;
/* Remote TCP Port */
u16 login_port;
- u16 local_port;
int net_size;
int login_family;
u32 auth_id;
@@ -527,9 +528,8 @@ struct iscsi_conn {
u32 exp_statsn;
/* Per connection status sequence number */
u32 stat_sn;
-#define IPV6_ADDRESS_SPACE 48
- unsigned char login_ip[IPV6_ADDRESS_SPACE];
- unsigned char local_ip[IPV6_ADDRESS_SPACE];
+ struct sockaddr_storage login_sockaddr;
+ struct sockaddr_storage local_sockaddr;
int conn_usage_count;
int conn_waiting_on_uc;
atomic_t check_immediate_queue;
@@ -636,7 +636,7 @@ struct iscsi_session {
/* session wide counter: expected command sequence number */
u32 exp_cmd_sn;
/* session wide counter: maximum allowed command sequence number */
- u32 max_cmd_sn;
+ atomic_t max_cmd_sn;
struct list_head sess_ooo_cmdsn_list;
/* LIO specific session ID */
@@ -764,6 +764,7 @@ struct iscsi_tpg_attrib {
u32 default_erl;
u8 t10_pi;
u32 fabric_prot_type;
+ u32 tpg_enabled_sendtargets;
struct iscsi_portal_group *tpg;
};
@@ -776,11 +777,10 @@ struct iscsi_np {
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
- u16 np_port;
spinlock_t np_thread_lock;
struct completion np_restart_comp;
struct socket *np_socket;
- struct __kernel_sockaddr_storage np_sockaddr;
+ struct sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
struct timer_list np_login_timer;
void *np_context;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 3ff76b4fa..e615bb485 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -50,7 +50,7 @@ struct iscsi_login_stats {
u64 last_fail_time; /* time stamp (jiffies) */
u32 last_fail_type;
int last_intr_fail_ip_family;
- unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ struct sockaddr_storage last_intr_fail_sockaddr;
char last_intr_fail_name[224];
} ____cacheline_aligned;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e6bb166f1..90e37faa2 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -9,7 +9,7 @@ struct iscsit_transport {
int priv_size;
struct module *owner;
struct list_head t_node;
- int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+ int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
void (*iscsit_wait_conn)(struct iscsi_conn *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1e5c8f949..56cf8e485 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -93,4 +93,6 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
+bool target_sense_desc_format(struct se_device *dev);
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 17ae2d6a4..5f48754dc 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,6 +6,7 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/percpu_ida.h>
+#include <linux/t10-pi.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -426,12 +427,6 @@ enum target_core_dif_check {
TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
};
-struct se_dif_v1_tuple {
- __be16 guard_tag;
- __be16 app_tag;
- __be32 ref_tag;
-};
-
/* for sam_task_attr */
#define TCM_SIMPLE_TAG 0x20
#define TCM_HEAD_TAG 0x21
@@ -444,6 +439,9 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
+ unsigned cmd_wait_set:1;
+ unsigned unknown_data_length:1;
+ bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
@@ -455,11 +453,8 @@ struct se_cmd {
unsigned int map_tag;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- unsigned cmd_wait_set:1;
- unsigned unknown_data_length:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
- u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
u32 residual_count;
@@ -477,7 +472,6 @@ struct se_cmd {
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
- struct kref cmd_kref;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
@@ -497,6 +491,7 @@ struct se_cmd {
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
spinlock_t t_state_lock;
+ struct kref cmd_kref;
struct completion t_transport_stop_comp;
struct work_struct work;
@@ -509,8 +504,10 @@ struct se_cmd {
struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents;
+ /* Used for lun->lun_ref counting */
+ int lun_ref_active;
+
struct list_head state_list;
- bool state_active;
/* old task stop completion, consider merging with some of the above */
struct completion task_stop_comp;
@@ -518,20 +515,17 @@ struct se_cmd {
/* backend private data */
void *priv;
- /* Used for lun->lun_ref counting */
- int lun_ref_active;
-
/* DIF related members */
enum target_prot_op prot_op;
enum target_prot_type prot_type;
u8 prot_checks;
+ bool prot_pto;
u32 prot_length;
u32 reftag_seed;
struct scatterlist *t_prot_sg;
unsigned int t_prot_nents;
sense_reason_t pi_err;
sector_t bad_sector;
- bool prot_pto;
};
struct se_ua {
@@ -598,7 +592,6 @@ struct se_ml_stat_grps {
};
struct se_lun_acl {
- char initiatorname[TRANSPORT_IQN_LEN];
u64 mapped_lun;
struct se_node_acl *se_lun_nacl;
struct se_lun *se_lun;
@@ -685,7 +678,6 @@ struct se_lun {
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
u32 lun_access;
- u32 lun_flags;
u32 lun_index;
/* RELATIVE TARGET PORT IDENTIFER */
@@ -738,6 +730,7 @@ struct se_device {
#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
+#define DF_READ_ONLY 0x00000020
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
@@ -751,7 +744,6 @@ struct se_device {
atomic_long_t write_bytes;
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
- atomic_t dev_ordered_id;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
u32 export_count;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 18afef91b..7fb2557a7 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -5,6 +5,19 @@ struct target_core_fabric_ops {
struct module *module;
const char *name;
size_t node_acl_size;
+ /*
+ * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
+ * Setting this value tells target-core to enforce this limit, and
+ * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
+ *
+ * target-core will currently reset se_cmd->data_length to this
+ * maximum size, and set UNDERFLOW residual count if length exceeds
+ * this limit.
+ *
+ * XXX: Not all initiator hosts honor this block-limit EVPD
+ * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
+ */
+ u32 max_data_sg_nents;
char *(*get_fabric_name)(void);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
@@ -152,6 +165,7 @@ int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u64);
+void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d96..317a1ed2f 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
#include <linux/tracepoint.h>
#define DAPM_DIRECT "(direct)"
+#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
struct snd_soc_jack;
struct snd_soc_codec;
@@ -152,62 +153,38 @@ TRACE_EVENT(snd_soc_dapm_walk_done,
(int)__entry->path_checks, (int)__entry->neighbour_checks)
);
-TRACE_EVENT(snd_soc_dapm_output_path,
+TRACE_EVENT(snd_soc_dapm_path,
TP_PROTO(struct snd_soc_dapm_widget *widget,
+ enum snd_soc_dapm_direction dir,
struct snd_soc_dapm_path *path),
- TP_ARGS(widget, path),
+ TP_ARGS(widget, dir, path),
TP_STRUCT__entry(
__string( wname, widget->name )
__string( pname, path->name ? path->name : DAPM_DIRECT)
- __string( psname, path->sink->name )
- __field( int, path_sink )
+ __string( pnname, path->node[dir]->name )
+ __field( int, path_node )
__field( int, path_connect )
+ __field( int, path_dir )
),
TP_fast_assign(
__assign_str(wname, widget->name);
__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
- __assign_str(psname, path->sink->name);
+ __assign_str(pnname, path->node[dir]->name);
__entry->path_connect = path->connect;
- __entry->path_sink = (long)path->sink;
+ __entry->path_node = (long)path->node[dir];
+ __entry->path_dir = dir;
),
- TP_printk("%c%s -> %s -> %s",
- (int) __entry->path_sink &&
+ TP_printk("%c%s %s %s %s %s",
+ (int) __entry->path_node &&
(int) __entry->path_connect ? '*' : ' ',
- __get_str(wname), __get_str(pname), __get_str(psname))
-);
-
-TRACE_EVENT(snd_soc_dapm_input_path,
-
- TP_PROTO(struct snd_soc_dapm_widget *widget,
- struct snd_soc_dapm_path *path),
-
- TP_ARGS(widget, path),
-
- TP_STRUCT__entry(
- __string( wname, widget->name )
- __string( pname, path->name ? path->name : DAPM_DIRECT)
- __string( psname, path->source->name )
- __field( int, path_source )
- __field( int, path_connect )
- ),
-
- TP_fast_assign(
- __assign_str(wname, widget->name);
- __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
- __assign_str(psname, path->source->name);
- __entry->path_connect = path->connect;
- __entry->path_source = (long)path->source;
- ),
-
- TP_printk("%c%s <- %s <- %s",
- (int) __entry->path_source &&
- (int) __entry->path_connect ? '*' : ' ',
- __get_str(wname), __get_str(pname), __get_str(psname))
+ __get_str(wname), DAPM_ARROW(__entry->path_dir),
+ __get_str(pname), DAPM_ARROW(__entry->path_dir),
+ __get_str(pnname))
);
TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d281..000000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ext3
-
-#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EXT3_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ext3_free_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( uid_t, uid )
- __field( gid_t, gid )
- __field( blkcnt_t, blocks )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->uid = i_uid_read(inode);
- __entry->gid = i_gid_read(inode);
- __entry->blocks = inode->i_blocks;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->uid, __entry->gid,
- (unsigned long) __entry->blocks)
-);
-
-TRACE_EVENT(ext3_request_inode,
- TP_PROTO(struct inode *dir, int mode),
-
- TP_ARGS(dir, mode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, dir )
- __field( umode_t, mode )
- ),
-
- TP_fast_assign(
- __entry->dev = dir->i_sb->s_dev;
- __entry->dir = dir->i_ino;
- __entry->mode = mode;
- ),
-
- TP_printk("dev %d,%d dir %lu mode 0%o",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_allocate_inode,
- TP_PROTO(struct inode *inode, struct inode *dir, int mode),
-
- TP_ARGS(inode, dir, mode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, dir )
- __field( umode_t, mode )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->dir = dir->i_ino;
- __entry->mode = mode;
- ),
-
- TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_evict_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( int, nlink )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->nlink = inode->i_nlink;
- ),
-
- TP_printk("dev %d,%d ino %lu nlink %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->nlink)
-);
-
-TRACE_EVENT(ext3_drop_inode,
- TP_PROTO(struct inode *inode, int drop),
-
- TP_ARGS(inode, drop),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( int, drop )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->drop = drop;
- ),
-
- TP_printk("dev %d,%d ino %lu drop %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->drop)
-);
-
-TRACE_EVENT(ext3_mark_inode_dirty,
- TP_PROTO(struct inode *inode, unsigned long IP),
-
- TP_ARGS(inode, IP),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field(unsigned long, ip )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->ip = IP;
- ),
-
- TP_printk("dev %d,%d ino %lu caller %pS",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext3_write_begin,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
-
- TP_ARGS(inode, pos, len, flags),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned int, len )
- __field( unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = pos;
- __entry->len = len;
- __entry->flags = flags;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(ext3__write_end,
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, pos )
- __field( unsigned int, len )
- __field( unsigned int, copied )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->pos = pos;
- __entry->len = len;
- __entry->copied = copied;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
-
- TP_ARGS(inode, pos, len, copied)
-);
-
-DECLARE_EVENT_CLASS(ext3__page_op,
- TP_PROTO(struct page *page),
-
- TP_ARGS(page),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( pgoff_t, index )
-
- ),
-
- TP_fast_assign(
- __entry->index = page->index;
- __entry->ino = page->mapping->host->i_ino;
- __entry->dev = page->mapping->host->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu page_index %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->index)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_readpage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_releasepage,
-
- TP_PROTO(struct page *page),
-
- TP_ARGS(page)
-);
-
-TRACE_EVENT(ext3_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
- TP_ARGS(page, offset, length),
-
- TP_STRUCT__entry(
- __field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
- __field( ino_t, ino )
- __field( dev_t, dev )
-
- ),
-
- TP_fast_assign(
- __entry->index = page->index;
- __entry->offset = offset;
- __entry->length = length;
- __entry->ino = page->mapping->host->i_ino;
- __entry->dev = page->mapping->host->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->index, __entry->offset, __entry->length)
-);
-
-TRACE_EVENT(ext3_discard_blocks,
- TP_PROTO(struct super_block *sb, unsigned long blk,
- unsigned long count),
-
- TP_ARGS(sb, blk, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( unsigned long, blk )
- __field( unsigned long, count )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->blk = blk;
- __entry->count = count;
- ),
-
- TP_printk("dev %d,%d blk %lu count %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blk, __entry->count)
-);
-
-TRACE_EVENT(ext3_request_blocks,
- TP_PROTO(struct inode *inode, unsigned long goal,
- unsigned long count),
-
- TP_ARGS(inode, goal, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( unsigned long, count )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->count = count;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->count, __entry->goal)
-);
-
-TRACE_EVENT(ext3_allocate_blocks,
- TP_PROTO(struct inode *inode, unsigned long goal,
- unsigned long count, unsigned long block),
-
- TP_ARGS(inode, goal, count, block),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( unsigned long, block )
- __field( unsigned long, count )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->block = block;
- __entry->count = count;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->count, __entry->block,
- __entry->goal)
-);
-
-TRACE_EVENT(ext3_free_blocks,
- TP_PROTO(struct inode *inode, unsigned long block,
- unsigned long count),
-
- TP_ARGS(inode, block, count),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( unsigned long, block )
- __field( unsigned long, count )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->block = block;
- __entry->count = count;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->block, __entry->count)
-);
-
-TRACE_EVENT(ext3_sync_file_enter,
- TP_PROTO(struct file *file, int datasync),
-
- TP_ARGS(file, datasync),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, parent )
- __field( int, datasync )
- ),
-
- TP_fast_assign(
- struct dentry *dentry = file->f_path.dentry;
-
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->datasync = datasync;
- __entry->parent = d_inode(dentry->d_parent)->i_ino;
- ),
-
- TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->parent, __entry->datasync)
-);
-
-TRACE_EVENT(ext3_sync_file_exit,
- TP_PROTO(struct inode *inode, int ret),
-
- TP_ARGS(inode, ret),
-
- TP_STRUCT__entry(
- __field( int, ret )
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->ret = ret;
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->ret)
-);
-
-TRACE_EVENT(ext3_sync_fs,
- TP_PROTO(struct super_block *sb, int wait),
-
- TP_ARGS(sb, wait),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, wait )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->wait = wait;
- ),
-
- TP_printk("dev %d,%d wait %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait)
-);
-
-TRACE_EVENT(ext3_rsv_window_add,
- TP_PROTO(struct super_block *sb,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(sb, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- ),
-
- TP_printk("dev %d,%d start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_discard_reservation,
- TP_PROTO(struct inode *inode,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(inode, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long)__entry->ino, __entry->start,
- __entry->end)
-);
-
-TRACE_EVENT(ext3_alloc_new_reservation,
- TP_PROTO(struct super_block *sb, unsigned long goal),
-
- TP_ARGS(sb, goal),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( unsigned long, goal )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->goal = goal;
- ),
-
- TP_printk("dev %d,%d goal %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->goal)
-);
-
-TRACE_EVENT(ext3_reserved,
- TP_PROTO(struct super_block *sb, unsigned long block,
- struct ext3_reserve_window_node *rsv_node),
-
- TP_ARGS(sb, block, rsv_node),
-
- TP_STRUCT__entry(
- __field( unsigned long, block )
- __field( unsigned long, start )
- __field( unsigned long, end )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->block = block;
- __entry->start = rsv_node->rsv_window._rsv_start;
- __entry->end = rsv_node->rsv_window._rsv_end;
- __entry->dev = sb->s_dev;
- ),
-
- TP_printk("dev %d,%d block %lu, start %lu end %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->block, __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_forget,
- TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
-
- TP_ARGS(inode, is_metadata, block),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( umode_t, mode )
- __field( int, is_metadata )
- __field( unsigned long, block )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->mode = inode->i_mode;
- __entry->is_metadata = is_metadata;
- __entry->block = block;
- ),
-
- TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->mode, __entry->is_metadata, __entry->block)
-);
-
-TRACE_EVENT(ext3_read_block_bitmap,
- TP_PROTO(struct super_block *sb, unsigned int group),
-
- TP_ARGS(sb, group),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( __u32, group )
-
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->group = group;
- ),
-
- TP_printk("dev %d,%d group %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->group)
-);
-
-TRACE_EVENT(ext3_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
- TP_ARGS(inode, offset, len, rw),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->rw)
-);
-
-TRACE_EVENT(ext3_direct_IO_exit,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
- int rw, int ret),
-
- TP_ARGS(inode, offset, len, rw, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( loff_t, pos )
- __field( unsigned long, len )
- __field( int, rw )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->pos = offset;
- __entry->len = len;
- __entry->rw = rw;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long) __entry->pos, __entry->len,
- __entry->rw, __entry->ret)
-);
-
-TRACE_EVENT(ext3_unlink_enter,
- TP_PROTO(struct inode *parent, struct dentry *dentry),
-
- TP_ARGS(parent, dentry),
-
- TP_STRUCT__entry(
- __field( ino_t, parent )
- __field( ino_t, ino )
- __field( loff_t, size )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->parent = parent->i_ino;
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->size = d_inode(dentry)->i_size;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu size %lld parent %ld",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long long)__entry->size,
- (unsigned long) __entry->parent)
-);
-
-TRACE_EVENT(ext3_unlink_exit,
- TP_PROTO(struct dentry *dentry, int ret),
-
- TP_ARGS(dentry, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = d_inode(dentry)->i_ino;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->ret)
-);
-
-DECLARE_EVENT_CLASS(ext3__truncate,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( blkcnt_t, blocks )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->blocks = inode->i_blocks;
- ),
-
- TP_printk("dev %d,%d ino %lu blocks %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
-
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
-
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
-TRACE_EVENT(ext3_get_blocks_enter,
- TP_PROTO(struct inode *inode, unsigned long lblk,
- unsigned long len, int create),
-
- TP_ARGS(inode, lblk, len, create),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( unsigned long, lblk )
- __field( unsigned long, len )
- __field( int, create )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->lblk = lblk;
- __entry->len = len;
- __entry->create = create;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->lblk, __entry->len, __entry->create)
-);
-
-TRACE_EVENT(ext3_get_blocks_exit,
- TP_PROTO(struct inode *inode, unsigned long lblk,
- unsigned long pblk, unsigned long len, int ret),
-
- TP_ARGS(inode, lblk, pblk, len, ret),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- __field( unsigned long, lblk )
- __field( unsigned long, pblk )
- __field( unsigned long, len )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- __entry->lblk = lblk;
- __entry->pblk = pblk;
- __entry->len = len;
- __entry->ret = ret;
- ),
-
- TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- __entry->lblk, __entry->pblk,
- __entry->len, __entry->ret)
-);
-
-TRACE_EVENT(ext3_load_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( ino_t, ino )
- __field( dev_t, dev )
- ),
-
- TP_fast_assign(
- __entry->ino = inode->i_ino;
- __entry->dev = inode->i_sb->s_dev;
- ),
-
- TP_printk("dev %d,%d ino %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
-);
-
-#endif /* _TRACE_EXT3_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2d8..a01946514 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
TP_PROTO(struct inode *inode, unsigned int pgofs,
- struct extent_node *en),
+ struct extent_info *ei),
- TP_ARGS(inode, pgofs, en),
+ TP_ARGS(inode, pgofs, ei),
- TP_CONDITION(en),
+ TP_CONDITION(ei),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1118,9 +1118,9 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
- __entry->fofs = en->ei.fofs;
- __entry->blk = en->ei.blk;
- __entry->len = en->ei.len;
+ __entry->fofs = ei->fofs;
+ __entry->blk = ei->blk;
+ __entry->len = ei->len;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
new file mode 100644
index 000000000..833cfcb67
--- /dev/null
+++ b/include/trace/events/fib.h
@@ -0,0 +1,113 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fib
+
+#if !defined(_TRACE_FIB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FIB_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/ip_fib.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fib_table_lookup,
+
+ TP_PROTO(u32 tb_id, const struct flowi4 *flp),
+
+ TP_ARGS(tb_id, flp),
+
+ TP_STRUCT__entry(
+ __field( u32, tb_id )
+ __field( int, oif )
+ __field( int, iif )
+ __field( __u8, tos )
+ __field( __u8, scope )
+ __field( __u8, flags )
+ __array( __u8, src, 4 )
+ __array( __u8, dst, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32;
+
+ __entry->tb_id = tb_id;
+ __entry->oif = flp->flowi4_oif;
+ __entry->iif = flp->flowi4_iif;
+ __entry->tos = flp->flowi4_tos;
+ __entry->scope = flp->flowi4_scope;
+ __entry->flags = flp->flowi4_flags;
+
+ p32 = (__be32 *) __entry->src;
+ *p32 = flp->saddr;
+
+ p32 = (__be32 *) __entry->dst;
+ *p32 = flp->daddr;
+ ),
+
+ TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
+ __entry->tb_id, __entry->oif, __entry->iif,
+ __entry->src, __entry->dst, __entry->tos, __entry->scope,
+ __entry->flags)
+);
+
+TRACE_EVENT(fib_table_lookup_nh,
+
+ TP_PROTO(const struct fib_nh *nh),
+
+ TP_ARGS(nh),
+
+ TP_STRUCT__entry(
+ __string( name, nh->nh_dev->name)
+ __field( int, oif )
+ __array( __u8, src, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32 = (__be32 *) __entry->src;
+
+ __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
+ __entry->oif = nh->nh_oif;
+ *p32 = nh->nh_saddr;
+ ),
+
+ TP_printk("nexthop dev %s oif %d src %pI4",
+ __get_str(name), __entry->oif, __entry->src)
+);
+
+TRACE_EVENT(fib_validate_source,
+
+ TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
+
+ TP_ARGS(dev, flp),
+
+ TP_STRUCT__entry(
+ __string( name, dev->name )
+ __field( int, oif )
+ __field( int, iif )
+ __field( __u8, tos )
+ __array( __u8, src, 4 )
+ __array( __u8, dst, 4 )
+ ),
+
+ TP_fast_assign(
+ __be32 *p32;
+
+ __assign_str(name, dev ? dev->name : "not set");
+ __entry->oif = flp->flowi4_oif;
+ __entry->iif = flp->flowi4_iif;
+ __entry->tos = flp->flowi4_tos;
+
+ p32 = (__be32 *) __entry->src;
+ *p32 = flp->saddr;
+
+ p32 = (__be32 *) __entry->dst;
+ *p32 = flp->daddr;
+ ),
+
+ TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
+ __get_str(name), __entry->oif, __entry->iif, __entry->tos,
+ __entry->src, __entry->dst)
+);
+#endif /* _TRACE_FIB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f2591c..000000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM jbd
-
-#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_JBD_H
-
-#include <linux/jbd.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(jbd_checkpoint,
-
- TP_PROTO(journal_t *journal, int result),
-
- TP_ARGS(journal, result),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, result )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->result = result;
- ),
-
- TP_printk("dev %d,%d result %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->result)
-);
-
-DECLARE_EVENT_CLASS(jbd_commit,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_start_commit,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_locking,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_logging,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction)
-);
-
-TRACE_EVENT(jbd_drop_transaction,
-
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-TRACE_EVENT(jbd_end_commit,
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- __field( int, head )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- __entry->head = journal->j_tail_sequence;
- ),
-
- TP_printk("dev %d,%d transaction %d head %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->head)
-);
-
-TRACE_EVENT(jbd_do_submit_data,
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
- TP_ARGS(journal, commit_transaction),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, transaction )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->transaction = commit_transaction->t_tid;
- ),
-
- TP_printk("dev %d,%d transaction %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction)
-);
-
-TRACE_EVENT(jbd_cleanup_journal_tail,
-
- TP_PROTO(journal_t *journal, tid_t first_tid,
- unsigned long block_nr, unsigned long freed),
-
- TP_ARGS(journal, first_tid, block_nr, freed),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( tid_t, tail_sequence )
- __field( tid_t, first_tid )
- __field(unsigned long, block_nr )
- __field(unsigned long, freed )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->tail_sequence = journal->j_tail_sequence;
- __entry->first_tid = first_tid;
- __entry->block_nr = block_nr;
- __entry->freed = freed;
- ),
-
- TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->tail_sequence, __entry->first_tid,
- __entry->block_nr, __entry->freed)
-);
-
-TRACE_EVENT(journal_write_superblock,
- TP_PROTO(journal_t *journal, int write_op),
-
- TP_ARGS(journal, write_op),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, write_op )
- ),
-
- TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
- __entry->write_op = write_op;
- ),
-
- TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->write_op)
-);
-
-#endif /* _TRACE_JBD_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index a44062da6..d6f83222a 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -358,6 +358,36 @@ TRACE_EVENT(
#endif
+TRACE_EVENT(kvm_halt_poll_ns,
+ TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+ TP_ARGS(grow, vcpu_id, new, old),
+
+ TP_STRUCT__entry(
+ __field(bool, grow)
+ __field(unsigned int, vcpu_id)
+ __field(int, new)
+ __field(int, old)
+ ),
+
+ TP_fast_assign(
+ __entry->grow = grow;
+ __entry->vcpu_id = vcpu_id;
+ __entry->new = new;
+ __entry->old = old;
+ ),
+
+ TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+ __entry->vcpu_id,
+ __entry->new,
+ __entry->grow ? "grow" : "shrink",
+ __entry->old)
+);
+
+#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
+#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
+ trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index c78e88ce5..ef72c4aad 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -661,7 +661,6 @@ TRACE_EVENT(rcu_torture_read,
* Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase:
* "Begin": _rcu_barrier() started.
- * "Check": _rcu_barrier() checking for piggybacking.
* "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
* "Inc1": _rcu_barrier() piggyback check counter incremented.
* "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index d57a575fe..539d6bc32 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
- TP_PROTO(struct task_struct *p, int success),
+ TP_PROTO(struct task_struct *p),
- TP_ARGS(__perf_task(p), success),
+ TP_ARGS(__perf_task(p)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
- __entry->success = success;
+ __entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
- TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+ TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success, __entry->target_cpu)
+ __entry->target_cpu)
);
+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+DEFINE_EVENT(sched_wakeup_template, sched_waking,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+/*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success));
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success));
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
new file mode 100644
index 000000000..62f005ef4
--- /dev/null
+++ b/include/trace/events/spmi.h
@@ -0,0 +1,135 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spmi
+
+#if !defined(_TRACE_SPMI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPMI_H
+
+#include <linux/spmi.h>
+#include <linux/tracepoint.h>
+
+/*
+ * drivers/spmi/spmi.c
+ */
+
+TRACE_EVENT(spmi_write_begin,
+ TP_PROTO(u8 opcode, u8 sid, u16 addr, u8 len, const u8 *buf),
+ TP_ARGS(opcode, sid, addr, len, buf),
+
+ TP_STRUCT__entry(
+ __field ( u8, opcode )
+ __field ( u8, sid )
+ __field ( u16, addr )
+ __field ( u8, len )
+ __dynamic_array ( u8, buf, len + 1 )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->sid = sid;
+ __entry->addr = addr;
+ __entry->len = len + 1;
+ memcpy(__get_dynamic_array(buf), buf, len + 1);
+ ),
+
+ TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
+ (int)__entry->opcode, (int)__entry->sid,
+ (int)__entry->addr, (int)__entry->len,
+ (int)__entry->len, __get_dynamic_array(buf))
+);
+
+TRACE_EVENT(spmi_write_end,
+ TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret),
+ TP_ARGS(opcode, sid, addr, ret),
+
+ TP_STRUCT__entry(
+ __field ( u8, opcode )
+ __field ( u8, sid )
+ __field ( u16, addr )
+ __field ( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->sid = sid;
+ __entry->addr = addr;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d",
+ (int)__entry->opcode, (int)__entry->sid,
+ (int)__entry->addr, __entry->ret)
+);
+
+TRACE_EVENT(spmi_read_begin,
+ TP_PROTO(u8 opcode, u8 sid, u16 addr),
+ TP_ARGS(opcode, sid, addr),
+
+ TP_STRUCT__entry(
+ __field ( u8, opcode )
+ __field ( u8, sid )
+ __field ( u16, addr )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->sid = sid;
+ __entry->addr = addr;
+ ),
+
+ TP_printk("opc=%d sid=%02d addr=0x%04x",
+ (int)__entry->opcode, (int)__entry->sid,
+ (int)__entry->addr)
+);
+
+TRACE_EVENT(spmi_read_end,
+ TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret, u8 len, const u8 *buf),
+ TP_ARGS(opcode, sid, addr, ret, len, buf),
+
+ TP_STRUCT__entry(
+ __field ( u8, opcode )
+ __field ( u8, sid )
+ __field ( u16, addr )
+ __field ( int, ret )
+ __field ( u8, len )
+ __dynamic_array ( u8, buf, len + 1 )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->sid = sid;
+ __entry->addr = addr;
+ __entry->ret = ret;
+ __entry->len = len + 1;
+ memcpy(__get_dynamic_array(buf), buf, len + 1);
+ ),
+
+ TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
+ (int)__entry->opcode, (int)__entry->sid,
+ (int)__entry->addr, __entry->ret, (int)__entry->len,
+ (int)__entry->len, __get_dynamic_array(buf))
+);
+
+TRACE_EVENT(spmi_cmd,
+ TP_PROTO(u8 opcode, u8 sid, int ret),
+ TP_ARGS(opcode, sid, ret),
+
+ TP_STRUCT__entry(
+ __field ( u8, opcode )
+ __field ( u8, sid )
+ __field ( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->sid = sid;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("opc=%d sid=%02d ret=%d", (int)__entry->opcode,
+ (int)__entry->sid, ret)
+);
+
+#endif /* _TRACE_SPMI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index dee3bb1d5..2cca6cd34 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -46,7 +46,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- memcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
index 12e1321c4..5afae8fe3 100644
--- a/include/trace/events/thermal_power_allocator.h
+++ b/include/trace/events/thermal_power_allocator.h
@@ -11,7 +11,7 @@ TRACE_EVENT(thermal_power_allocator,
u32 total_req_power, u32 *granted_power,
u32 total_granted_power, size_t num_actors,
u32 power_range, u32 max_allocatable_power,
- unsigned long current_temp, s32 delta_temp),
+ int current_temp, s32 delta_temp),
TP_ARGS(tz, req_power, total_req_power, granted_power,
total_granted_power, num_actors, power_range,
max_allocatable_power, current_temp, delta_temp),
@@ -24,7 +24,7 @@ TRACE_EVENT(thermal_power_allocator,
__field(size_t, num_actors )
__field(u32, power_range )
__field(u32, max_allocatable_power )
- __field(unsigned long, current_temp )
+ __field(int, current_temp )
__field(s32, delta_temp )
),
TP_fast_assign(
@@ -42,7 +42,7 @@ TRACE_EVENT(thermal_power_allocator,
__entry->delta_temp = delta_temp;
),
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d",
+ TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
__entry->tz_id,
__print_array(__get_dynamic_array(req_power),
__entry->num_actors, 4),
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 4250f364a..bc8815f45 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -11,7 +11,8 @@
EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
- EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" )
+ EM( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) \
+ EMe( TLB_REMOTE_SEND_IPI, "remote ipi send" )
/*
* First define the enums in TLB_FLUSH_REASON to be exported to userspace
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 89d0497c0..dbf017bfd 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -93,90 +93,183 @@ SHOW_FIELD
{ V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \
{ V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" })
-#define V4L2_TRACE_EVENT(event_name) \
- TRACE_EVENT(event_name, \
- TP_PROTO(int minor, struct v4l2_buffer *buf), \
- \
- TP_ARGS(minor, buf), \
- \
- TP_STRUCT__entry( \
- __field(int, minor) \
- __field(u32, index) \
- __field(u32, type) \
- __field(u32, bytesused) \
- __field(u32, flags) \
- __field(u32, field) \
- __field(s64, timestamp) \
- __field(u32, timecode_type) \
- __field(u32, timecode_flags) \
- __field(u8, timecode_frames) \
- __field(u8, timecode_seconds) \
- __field(u8, timecode_minutes) \
- __field(u8, timecode_hours) \
- __field(u8, timecode_userbits0) \
- __field(u8, timecode_userbits1) \
- __field(u8, timecode_userbits2) \
- __field(u8, timecode_userbits3) \
- __field(u32, sequence) \
- ), \
- \
- TP_fast_assign( \
- __entry->minor = minor; \
- __entry->index = buf->index; \
- __entry->type = buf->type; \
- __entry->bytesused = buf->bytesused; \
- __entry->flags = buf->flags; \
- __entry->field = buf->field; \
- __entry->timestamp = \
- timeval_to_ns(&buf->timestamp); \
- __entry->timecode_type = buf->timecode.type; \
- __entry->timecode_flags = buf->timecode.flags; \
- __entry->timecode_frames = \
- buf->timecode.frames; \
- __entry->timecode_seconds = \
- buf->timecode.seconds; \
- __entry->timecode_minutes = \
- buf->timecode.minutes; \
- __entry->timecode_hours = buf->timecode.hours; \
- __entry->timecode_userbits0 = \
- buf->timecode.userbits[0]; \
- __entry->timecode_userbits1 = \
- buf->timecode.userbits[1]; \
- __entry->timecode_userbits2 = \
- buf->timecode.userbits[2]; \
- __entry->timecode_userbits3 = \
- buf->timecode.userbits[3]; \
- __entry->sequence = buf->sequence; \
- ), \
- \
- TP_printk("minor = %d, index = %u, type = %s, " \
- "bytesused = %u, flags = %s, " \
- "field = %s, timestamp = %llu, timecode = { " \
- "type = %s, flags = %s, frames = %u, " \
- "seconds = %u, minutes = %u, hours = %u, " \
- "userbits = { %u %u %u %u } }, " \
- "sequence = %u", __entry->minor, \
- __entry->index, show_type(__entry->type), \
- __entry->bytesused, \
- show_flags(__entry->flags), \
- show_field(__entry->field), \
- __entry->timestamp, \
- show_timecode_type(__entry->timecode_type), \
- show_timecode_flags(__entry->timecode_flags), \
- __entry->timecode_frames, \
- __entry->timecode_seconds, \
- __entry->timecode_minutes, \
- __entry->timecode_hours, \
- __entry->timecode_userbits0, \
- __entry->timecode_userbits1, \
- __entry->timecode_userbits2, \
- __entry->timecode_userbits3, \
- __entry->sequence \
- ) \
+DECLARE_EVENT_CLASS(v4l2_event_class,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+
+ TP_ARGS(minor, buf),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ __field(u32, flags)
+ __field(u32, field)
+ __field(s64, timestamp)
+ __field(u32, timecode_type)
+ __field(u32, timecode_flags)
+ __field(u8, timecode_frames)
+ __field(u8, timecode_seconds)
+ __field(u8, timecode_minutes)
+ __field(u8, timecode_hours)
+ __field(u8, timecode_userbits0)
+ __field(u8, timecode_userbits1)
+ __field(u8, timecode_userbits2)
+ __field(u8, timecode_userbits3)
+ __field(u32, sequence)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = minor;
+ __entry->index = buf->index;
+ __entry->type = buf->type;
+ __entry->bytesused = buf->bytesused;
+ __entry->flags = buf->flags;
+ __entry->field = buf->field;
+ __entry->timestamp = timeval_to_ns(&buf->timestamp);
+ __entry->timecode_type = buf->timecode.type;
+ __entry->timecode_flags = buf->timecode.flags;
+ __entry->timecode_frames = buf->timecode.frames;
+ __entry->timecode_seconds = buf->timecode.seconds;
+ __entry->timecode_minutes = buf->timecode.minutes;
+ __entry->timecode_hours = buf->timecode.hours;
+ __entry->timecode_userbits0 = buf->timecode.userbits[0];
+ __entry->timecode_userbits1 = buf->timecode.userbits[1];
+ __entry->timecode_userbits2 = buf->timecode.userbits[2];
+ __entry->timecode_userbits3 = buf->timecode.userbits[3];
+ __entry->sequence = buf->sequence;
+ ),
+
+ TP_printk("minor = %d, index = %u, type = %s, bytesused = %u, "
+ "flags = %s, field = %s, timestamp = %llu, "
+ "timecode = { type = %s, flags = %s, frames = %u, "
+ "seconds = %u, minutes = %u, hours = %u, "
+ "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+ __entry->index, show_type(__entry->type),
+ __entry->bytesused,
+ show_flags(__entry->flags),
+ show_field(__entry->field),
+ __entry->timestamp,
+ show_timecode_type(__entry->timecode_type),
+ show_timecode_flags(__entry->timecode_flags),
+ __entry->timecode_frames,
+ __entry->timecode_seconds,
+ __entry->timecode_minutes,
+ __entry->timecode_hours,
+ __entry->timecode_userbits0,
+ __entry->timecode_userbits1,
+ __entry->timecode_userbits2,
+ __entry->timecode_userbits3,
+ __entry->sequence
+ )
+)
+
+DEFINE_EVENT(v4l2_event_class, v4l2_dqbuf,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+ TP_ARGS(minor, buf)
+);
+
+DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
+ TP_PROTO(int minor, struct v4l2_buffer *buf),
+ TP_ARGS(minor, buf)
+);
+
+DECLARE_EVENT_CLASS(vb2_event_class,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(u32, queued_count)
+ __field(int, owned_by_drv_count)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ __field(u32, flags)
+ __field(u32, field)
+ __field(s64, timestamp)
+ __field(u32, timecode_type)
+ __field(u32, timecode_flags)
+ __field(u8, timecode_frames)
+ __field(u8, timecode_seconds)
+ __field(u8, timecode_minutes)
+ __field(u8, timecode_hours)
+ __field(u8, timecode_userbits0)
+ __field(u8, timecode_userbits1)
+ __field(u8, timecode_userbits2)
+ __field(u8, timecode_userbits3)
+ __field(u32, sequence)
+ ),
+
+ TP_fast_assign(
+ __entry->minor = q->owner ? q->owner->vdev->minor : -1;
+ __entry->queued_count = q->queued_count;
+ __entry->owned_by_drv_count =
+ atomic_read(&q->owned_by_drv_count);
+ __entry->index = vb->v4l2_buf.index;
+ __entry->type = vb->v4l2_buf.type;
+ __entry->bytesused = vb->v4l2_planes[0].bytesused;
+ __entry->flags = vb->v4l2_buf.flags;
+ __entry->field = vb->v4l2_buf.field;
+ __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
+ __entry->timecode_type = vb->v4l2_buf.timecode.type;
+ __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
+ __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
+ __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
+ __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
+ __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
+ __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
+ __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
+ __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
+ __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
+ __entry->sequence = vb->v4l2_buf.sequence;
+ ),
+
+ TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
+ "type = %s, bytesused = %u, flags = %s, field = %s, "
+ "timestamp = %llu, timecode = { type = %s, flags = %s, "
+ "frames = %u, seconds = %u, minutes = %u, hours = %u, "
+ "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
+ __entry->queued_count,
+ __entry->owned_by_drv_count,
+ __entry->index, show_type(__entry->type),
+ __entry->bytesused,
+ show_flags(__entry->flags),
+ show_field(__entry->field),
+ __entry->timestamp,
+ show_timecode_type(__entry->timecode_type),
+ show_timecode_flags(__entry->timecode_flags),
+ __entry->timecode_frames,
+ __entry->timecode_seconds,
+ __entry->timecode_minutes,
+ __entry->timecode_hours,
+ __entry->timecode_userbits0,
+ __entry->timecode_userbits1,
+ __entry->timecode_userbits2,
+ __entry->timecode_userbits3,
+ __entry->sequence
)
+)
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
-V4L2_TRACE_EVENT(v4l2_dqbuf);
-V4L2_TRACE_EVENT(v4l2_qbuf);
+DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index a7aa607a4..fff846b51 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -131,6 +131,66 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
TP_ARGS(inode, flags)
);
+#ifdef CREATE_TRACE_POINTS
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+{
+ return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
+}
+
+static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+{
+ struct cgroup *cgrp = wb->memcg_css->cgroup;
+ char *path;
+
+ path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
+ WARN_ON_ONCE(path != buf);
+}
+
+static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+{
+ if (wbc->wb)
+ return __trace_wb_cgroup_size(wbc->wb);
+ else
+ return 2;
+}
+
+static inline void __trace_wbc_assign_cgroup(char *buf,
+ struct writeback_control *wbc)
+{
+ if (wbc->wb)
+ __trace_wb_assign_cgroup(buf, wbc->wb);
+ else
+ strcpy(buf, "/");
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+{
+ return 2;
+}
+
+static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+{
+ strcpy(buf, "/");
+}
+
+static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+{
+ return 2;
+}
+
+static inline void __trace_wbc_assign_cgroup(char *buf,
+ struct writeback_control *wbc)
+{
+ strcpy(buf, "/");
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+#endif /* CREATE_TRACE_POINTS */
+
DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
@@ -141,6 +201,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(int, sync_mode)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -148,12 +209,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
__entry->name,
__entry->ino,
- __entry->sync_mode
+ __entry->sync_mode,
+ __get_str(cgroup)
)
);
@@ -172,8 +235,8 @@ DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
);
DECLARE_EVENT_CLASS(writeback_work_class,
- TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
- TP_ARGS(bdi, work),
+ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
+ TP_ARGS(wb, work),
TP_STRUCT__entry(
__array(char, name, 32)
__field(long, nr_pages)
@@ -183,10 +246,11 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -194,9 +258,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -204,13 +269,14 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- __print_symbolic(__entry->reason, WB_WORK_REASON)
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+ __get_str(cgroup)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
DEFINE_EVENT(writeback_work_class, name, \
- TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
- TP_ARGS(bdi, work))
+ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
+ TP_ARGS(wb, work))
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -230,26 +296,42 @@ TRACE_EVENT(writeback_pages_written,
);
DECLARE_EVENT_CLASS(writeback_class,
- TP_PROTO(struct backing_dev_info *bdi),
- TP_ARGS(bdi),
+ TP_PROTO(struct bdi_writeback *wb),
+ TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
- TP_printk("bdi %s",
- __entry->name
+ TP_printk("bdi %s: cgroup=%s",
+ __entry->name,
+ __get_str(cgroup)
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
DEFINE_EVENT(writeback_class, name, \
- TP_PROTO(struct backing_dev_info *bdi), \
- TP_ARGS(bdi))
+ TP_PROTO(struct bdi_writeback *wb), \
+ TP_ARGS(wb))
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
+
+TRACE_EVENT(writeback_bdi_register,
+ TP_PROTO(struct backing_dev_info *bdi),
+ TP_ARGS(bdi),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ ),
+ TP_printk("bdi %s",
+ __entry->name
+ )
+);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -265,6 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -278,11 +361,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx",
+ "start=0x%lx end=0x%lx cgroup=%s",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -292,7 +376,9 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
- __entry->range_end)
+ __entry->range_end,
+ __get_str(cgroup)
+ )
)
#define DEFINE_WBC_EVENT(name) \
@@ -312,6 +398,7 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
@@ -321,13 +408,15 @@ TRACE_EVENT(writeback_queue_io,
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- __print_symbolic(__entry->reason, WB_WORK_REASON)
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+ __get_str(cgroup)
)
);
@@ -381,11 +470,11 @@ TRACE_EVENT(global_dirty_state,
TRACE_EVENT(bdi_dirty_ratelimit,
- TP_PROTO(struct backing_dev_info *bdi,
+ TP_PROTO(struct bdi_writeback *wb,
unsigned long dirty_rate,
unsigned long task_ratelimit),
- TP_ARGS(bdi, dirty_rate, task_ratelimit),
+ TP_ARGS(wb, dirty_rate, task_ratelimit),
TP_STRUCT__entry(
__array(char, bdi, 32)
@@ -395,36 +484,39 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->write_bw = KBps(bdi->wb.write_bandwidth);
- __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth);
+ strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ __entry->write_bw = KBps(wb->write_bandwidth);
+ __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
- __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
+ __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
- KBps(bdi->wb.balanced_dirty_ratelimit);
+ KBps(wb->balanced_dirty_ratelimit);
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu",
+ "balanced_dirty_ratelimit=%lu cgroup=%s",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
__entry->dirty_rate, /* bdi dirty rate */
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
- __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
+ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
+ __get_str(cgroup)
)
);
TRACE_EVENT(balance_dirty_pages,
- TP_PROTO(struct backing_dev_info *bdi,
+ TP_PROTO(struct bdi_writeback *wb,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
@@ -437,7 +529,7 @@ TRACE_EVENT(balance_dirty_pages,
long pause,
unsigned long start_time),
- TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
@@ -456,11 +548,12 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
+ __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
),
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+ strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -478,6 +571,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
+ __trace_wb_assign_cgroup(__get_str(cgroup), wb);
),
@@ -486,7 +580,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -500,7 +594,8 @@ TRACE_EVENT(balance_dirty_pages,
__entry->paused, /* ms */
__entry->pause, /* ms */
__entry->period, /* ms */
- __entry->think /* ms */
+ __entry->think, /* ms */
+ __get_str(cgroup)
)
);
@@ -514,6 +609,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
+ __dynamic_array(char, cgroup,
+ __trace_wb_cgroup_size(inode_to_wb(inode)))
),
TP_fast_assign(
@@ -522,14 +619,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
+ __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
- (jiffies - __entry->dirtied_when) / HZ
+ (jiffies - __entry->dirtied_when) / HZ,
+ __get_str(cgroup)
)
);
@@ -585,6 +684,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
+ __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
),
TP_fast_assign(
@@ -596,10 +696,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
+ __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu",
+ "index=%lu to_write=%ld wrote=%lu cgroup=%s",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
@@ -607,7 +708,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
- __entry->wrote
+ __entry->wrote,
+ __get_str(cgroup)
)
);
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 9df61f1ed..3094618d3 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -80,8 +80,10 @@
* SA_RESTORER 0x04000000
*/
+#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
+#endif
#ifndef __ASSEMBLY__
typedef struct {
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index e016bd9b1..ee124009e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,15 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
__SYSCALL(__NR_bpf, sys_bpf)
#define __NR_execveat 281
__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 282
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 283
+__SYSCALL(__NR_membarrier, sys_membarrier)
#undef __NR_syscalls
-#define __NR_syscalls 282
+#define __NR_syscalls 284
/*
* All syscalls below here should go away really,
* these are provided for both review and as a porting
* help for the C library version.
-*
+ *
* Last chance: are any of these important enough to
* enable by default?
*/
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cde6..8c5e8b91a 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+/* 8 bpp Red */
+#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b722..fd5aa47bd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_REVISION 32
#define I915_PARAM_SUBSLICE_TOTAL 33
#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
typedef struct drm_i915_getparam {
- int param;
+ __s32 param;
+ /*
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
int __user *value;
} drm_i915_getparam_t;
@@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@ struct drm_i915_gem_context_param {
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
__u64 value;
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe..05b204954 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
/*************************************************************************/
/**
@@ -88,6 +89,8 @@
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+#define DRM_VMW_PARAM_SCREEN_TARGET 11
+#define DRM_VMW_PARAM_DX 12
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
struct drm_vmw_execbuf_arg {
uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
+ uint32_t context_handle;
+ uint32_t pad64;
};
/**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
enum drm_vmw_shader_type {
drm_vmw_shader_type_vs = 0,
drm_vmw_shader_type_ps,
- drm_vmw_shader_type_gs
};
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
* if none.
* @base_size Size of the base mip level for all faces.
+ * @array_size Must be zero for non-DX hardware, and if non-zero
+ * svga3d_flags must have proper bind flags setup.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
- uint32_t pad64;
+ uint32_t array_size;
struct drm_vmw_size base_size;
};
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
uint32_t pad64;
};
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+ drm_vmw_context_legacy,
+ drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+ enum drm_vmw_extended_context req;
+ struct drm_vmw_context_arg rep;
+};
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 49a6bc538..93fc001d1 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -245,6 +245,7 @@ header-y += limits.h
header-y += llc.h
header-y += loop.h
header-y += lp.h
+header-y += lwtunnel.h
header-y += magic.h
header-y += major.h
header-y += map_to_7segment.h
@@ -253,6 +254,7 @@ header-y += mdio.h
header-y += media.h
header-y += media-bus-format.h
header-y += mei.h
+header-y += membarrier.h
header-y += memfd.h
header-y += mempolicy.h
header-y += meye.h
@@ -457,3 +459,4 @@ header-y += xfrm.h
header-y += xilinx-v4l2-controls.h
header-y += zorro.h
header-y += zorro_ids.h
+header-y += userfaultfd.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1f1..843540c39 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -266,6 +266,7 @@
#define AUDIT_OBJ_UID 109
#define AUDIT_OBJ_GID 110
#define AUDIT_FIELD_COMPARE 111
+#define AUDIT_EXE 112
#define AUDIT_ARG0 200
#define AUDIT_ARG1 (AUDIT_ARG0+1)
@@ -324,8 +325,10 @@ enum {
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
+#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
- AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME)
+ AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
+ AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
@@ -382,6 +385,9 @@ enum {
#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_SPARC (EM_SPARC)
#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_TILEGX (EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_PERM_EXEC 1
diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h
index 9822d7639..0eb09ee31 100644
--- a/include/uapi/linux/aufs_type.h
+++ b/include/uapi/linux/aufs_type.h
@@ -26,7 +26,7 @@
#include <linux/limits.h>
-#define AUFS_VERSION "4.2"
+#define AUFS_VERSION "4.3"
/* todo? move this to linux-2.6.19/include/magic.h */
#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f99e..92a48e2d5 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY,
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY,
};
enum bpf_prog_type {
@@ -249,6 +250,28 @@ enum bpf_func_id {
* Return: 0 on success
*/
BPF_FUNC_get_current_comm,
+
+ /**
+ * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+ * @skb: pointer to skb
+ * Return: classid if != 0
+ */
+ BPF_FUNC_get_cgroup_classid,
+ BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+ BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
+
+ /**
+ * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+ * retrieve or populate tunnel metadata
+ * @skb: pointer to skb
+ * @key: pointer to 'struct bpf_tunnel_key'
+ * @size: size of 'struct bpf_tunnel_key'
+ * @flags: room for future extensions
+ * Retrun: 0 on success
+ */
+ BPF_FUNC_skb_get_tunnel_key,
+ BPF_FUNC_skb_set_tunnel_key,
+ BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
__BPF_FUNC_MAX_ID,
};
@@ -269,6 +292,12 @@ struct __sk_buff {
__u32 ifindex;
__u32 tc_index;
__u32 cb[5];
+ __u32 hash;
+};
+
+struct bpf_tunnel_key {
+ __u32 tunnel_id;
+ __u32 remote_ipv4;
};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783c4..df56c8ff0 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
/* Version of the device interface */
#define DLM_DEVICE_VERSION_MAJOR 6
#define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 1
+#define DLM_DEVICE_VERSION_PATCH 2
/* struct passed to the lock write */
struct dlm_lock_params {
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b08829667..b56dfcfe9 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,9 @@
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_AARCH64 183 /* ARM 64 bit */
+#define EM_TILEPRO 188 /* Tilera TILEPro */
+#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
+#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec18..cd1629170 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
* respectively. For example, if the device supports HWTSTAMP_TX_ON,
* then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
*/
struct ethtool_ts_info {
__u32 cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e30..96161b820 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@ enum {
FRA_UNUSED5,
FRA_FWMARK, /* mark */
FRA_FLOW, /* flow/class id */
- FRA_UNUSED6,
+ FRA_TUN_ID,
FRA_SUPPRESS_IFGROUP,
FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE, /* Extended table id */
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index c06742d52..ab055d8cd 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -3,6 +3,7 @@
#include <linux/if.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
struct gsm_config
{
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea6208..3635b7797 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@ struct br_mdb_entry {
#define MDB_TEMPORARY 0
#define MDB_PERMANENT 1
__u8 state;
+ __u16 vid;
struct {
union {
__be32 ip4;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index aa63ed023..ea9221b03 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -42,6 +42,7 @@
#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
+#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3d3..3a5f263cf 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@ enum {
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
+ IFLA_PROTO_DOWN,
__IFLA_MAX
};
@@ -229,6 +230,8 @@ enum {
IFLA_BR_AGEING_TIME,
IFLA_BR_STP_STATE,
IFLA_BR_PRIORITY,
+ IFLA_BR_VLAN_FILTERING,
+ IFLA_BR_VLAN_PROTOCOL,
__IFLA_BR_MAX,
};
@@ -339,6 +342,15 @@ enum macvlan_macaddr_mode {
#define MACVLAN_FLAG_NOPROMISC 1
+/* VRF section */
+enum {
+ IFLA_VRF_UNSPEC,
+ IFLA_VRF_TABLE,
+ __IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
/* IPVLAN section */
enum {
IFLA_IPVLAN_UNSPEC,
@@ -381,6 +393,7 @@ enum {
IFLA_VXLAN_REMCSUM_RX,
IFLA_VXLAN_GBP,
IFLA_VXLAN_REMCSUM_NOPARTIAL,
+ IFLA_VXLAN_COLLECT_METADATA,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -397,6 +410,8 @@ enum {
IFLA_GENEVE_REMOTE,
IFLA_GENEVE_TTL,
IFLA_GENEVE_TOS,
+ IFLA_GENEVE_PORT, /* destination port */
+ IFLA_GENEVE_COLLECT_METADATA,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -431,6 +446,7 @@ enum {
IFLA_BOND_AD_ACTOR_SYS_PRIO,
IFLA_BOND_AD_USER_PORT_KEY,
IFLA_BOND_AD_ACTOR_SYSTEM,
+ IFLA_BOND_TLB_DYNAMIC_LB,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index d3d715f8c..9e7edfd81 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,7 @@ struct sockaddr_ll {
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
#define PACKET_ROLLOVER_STATS 21
+#define PACKET_FANOUT_DATA 22
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -62,6 +63,8 @@ struct sockaddr_ll {
#define PACKET_FANOUT_ROLLOVER 3
#define PACKET_FANOUT_RND 4
#define PACKET_FANOUT_QM 5
+#define PACKET_FANOUT_CBPF 6
+#define PACKET_FANOUT_EBPF 7
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11a4..af4de90ba 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@ enum {
IFLA_GRE_ENCAP_FLAGS,
IFLA_GRE_ENCAP_SPORT,
IFLA_GRE_ENCAP_DPORT,
+ IFLA_GRE_COLLECT_METADATA,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644
index 000000000..7ed9e6708
--- /dev/null
+++ b/include/uapi/linux/ila.h
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+ ILA_ATTR_UNSPEC,
+ ILA_ATTR_LOCATOR, /* u64 */
+
+ __ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 3199243f2..391395c06 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -406,6 +406,11 @@ enum {
IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
+ IPVS_DAEMON_ATTR_SYNC_MAXLEN, /* UDP Payload Size */
+ IPVS_DAEMON_ATTR_MCAST_GROUP, /* IPv4 Multicast Address */
+ IPVS_DAEMON_ATTR_MCAST_GROUP6, /* IPv6 Multicast Address */
+ IPVS_DAEMON_ATTR_MCAST_PORT, /* Multicast Port (base) */
+ IPVS_DAEMON_ATTR_MCAST_TTL, /* Multicast TTL */
__IPVS_DAEMON_ATTR_MAX,
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54ae5..38b4fef20 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,9 @@ enum {
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
DEVCONF_STABLE_SECRET,
+ DEVCONF_USE_OIF_ADDRS_ONLY,
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index a6c4962e5..5da5f8751 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -33,6 +33,7 @@
#define KPF_THP 22
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
+#define KPF_IDLE 25
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 716ad4ae4..a9256f033 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -237,6 +237,7 @@ struct kvm_run {
__u32 count;
__u64 data_offset; /* relative to kvm_run start */
} io;
+ /* KVM_EXIT_DEBUG */
struct {
struct kvm_debug_exit_arch arch;
} debug;
@@ -285,6 +286,7 @@ struct kvm_run {
__u32 data;
__u8 is_write;
} dcr;
+ /* KVM_EXIT_INTERNAL_ERROR */
struct {
__u32 suberror;
/* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
@@ -295,6 +297,7 @@ struct kvm_run {
struct {
__u64 gprs[32];
} osi;
+ /* KVM_EXIT_PAPR_HCALL */
struct {
__u64 nr;
__u64 ret;
@@ -317,6 +320,7 @@ struct kvm_run {
struct {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
+#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
__u64 flags;
} system_event;
@@ -481,6 +485,7 @@ struct kvm_s390_psw {
((ai) << 26))
#define KVM_S390_INT_IO_MIN 0x00000000u
#define KVM_S390_INT_IO_MAX 0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK 0x04000000u
struct kvm_s390_interrupt {
@@ -817,6 +822,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_DISABLE_QUIRKS 116
#define KVM_CAP_X86_SMM 117
#define KVM_CAP_MULTI_ADDRESS_SPACE 118
+#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
+#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 000000000..f8b01887a
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,43 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+ LWTUNNEL_ENCAP_NONE,
+ LWTUNNEL_ENCAP_MPLS,
+ LWTUNNEL_ENCAP_IP,
+ LWTUNNEL_ENCAP_ILA,
+ LWTUNNEL_ENCAP_IP6,
+ __LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+enum lwtunnel_ip_t {
+ LWTUNNEL_IP_UNSPEC,
+ LWTUNNEL_IP_ID,
+ LWTUNNEL_IP_DST,
+ LWTUNNEL_IP_SRC,
+ LWTUNNEL_IP_TTL,
+ LWTUNNEL_IP_TOS,
+ LWTUNNEL_IP_FLAGS,
+ __LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+ LWTUNNEL_IP6_UNSPEC,
+ LWTUNNEL_IP6_ID,
+ LWTUNNEL_IP6_DST,
+ LWTUNNEL_IP6_SRC,
+ LWTUNNEL_IP6_HOPLIMIT,
+ LWTUNNEL_IP6_TC,
+ LWTUNNEL_IP6_FLAGS,
+ __LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index bc0d8b69c..7c3b64f6a 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -107,4 +107,23 @@ struct mei_connect_client_data {
};
};
+/**
+ * DOC: set and unset event notification for a connected client
+ *
+ * The IOCTL argument is 1 for enabling event notification and 0 for
+ * disabling the service
+ * Return: -EOPNOTSUPP if the devices doesn't support the feature
+ */
+#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
+
+/**
+ * DOC: retrieve notification
+ *
+ * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
+ * the ioctl has to be called in order to acknowledge pending event
+ *
+ * Return: -EOPNOTSUPP if the devices doesn't support the feature
+ */
+#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32)
+
#endif /* _LINUX_MEI_H */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
new file mode 100644
index 000000000..e0b108bd2
--- /dev/null
+++ b/include/uapi/linux/membarrier.h
@@ -0,0 +1,53 @@
+#ifndef _UAPI_LINUX_MEMBARRIER_H
+#define _UAPI_LINUX_MEMBARRIER_H
+
+/*
+ * linux/membarrier.h
+ *
+ * membarrier system call API
+ *
+ * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * enum membarrier_cmd - membarrier system call command
+ * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns
+ * a bitmask of valid commands.
+ * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads.
+ * Upon return from system call, the caller thread
+ * is ensured that all running threads have passed
+ * through a state where all memory accesses to
+ * user-space addresses match program order between
+ * entry to and return from the system call
+ * (non-running threads are de facto in such a
+ * state). This covers threads from all processes
+ * running on the system. This command returns 0.
+ *
+ * Command to be passed to the membarrier system call. The commands need to
+ * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
+ * the value 0.
+ */
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+};
+
+#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd1c..24a6cb1ae 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@ struct mpls_label {
#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+#define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 000000000..d80a0498f
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
+/*
+ * mpls tunnel api
+ *
+ * Authors:
+ * Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ * [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+ MPLS_IPTUNNEL_UNSPEC,
+ MPLS_IPTUNNEL_DST,
+ __MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 2b94ea228..5b4a4be06 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -87,7 +87,7 @@ struct nd_cmd_ars_status {
__u32 handle;
__u32 flags;
__u64 err_address;
- __u64 mask;
+ __u64 length;
} __packed records[0];
} __packed;
@@ -111,6 +111,11 @@ enum {
ND_CMD_VENDOR = 9,
};
+enum {
+ ND_ARS_VOLATILE = 1,
+ ND_ARS_PERSISTENT = 2,
+};
+
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
{
static const char * const names[] = {
@@ -194,4 +199,9 @@ enum nd_driver_flags {
enum {
ND_MIN_NAMESPACE_SIZE = 0x00400000,
};
+
+enum ars_masks {
+ ARS_STATUS_MASK = 0x0000FFFF,
+ ARS_EXT_STATUS_SHIFT = 16,
+};
#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61bb..788655bfa 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@ struct ndt_stats {
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
+ __u64 ndts_table_fulls;
};
enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe668..ed4e776e1 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@ enum sctp_conntrack {
SCTP_CONNTRACK_SHUTDOWN_SENT,
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED,
SCTP_CONNTRACK_MAX
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a99e6a997..d8c8a7c9d 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -756,16 +756,25 @@ enum nft_ct_attributes {
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+enum nft_limit_type {
+ NFT_LIMIT_PKTS,
+ NFT_LIMIT_PKT_BYTES
+};
+
/**
* enum nft_limit_attributes - nf_tables limit expression netlink attributes
*
* @NFTA_LIMIT_RATE: refill rate (NLA_U64)
* @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
*/
enum nft_limit_attributes {
NFTA_LIMIT_UNSPEC,
NFTA_LIMIT_RATE,
NFTA_LIMIT_UNIT,
+ NFTA_LIMIT_BURST,
+ NFTA_LIMIT_TYPE,
__NFTA_LIMIT_MAX
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
@@ -936,6 +945,20 @@ enum nft_redir_attributes {
#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
/**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+ NFTA_DUP_UNSPEC,
+ NFTA_DUP_SREG_ADDR,
+ NFTA_DUP_SREG_DEV,
+ __NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX (__NFTA_DUP_MAX - 1)
+
+/**
* enum nft_gen_attributes - nf_tables ruleset generation attributes
*
* @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index acad6c52a..c1a4e1441 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -61,6 +61,7 @@ enum ctattr_tuple {
CTA_TUPLE_UNSPEC,
CTA_TUPLE_IP,
CTA_TUPLE_PROTO,
+ CTA_TUPLE_ZONE,
__CTA_TUPLE_MAX
};
#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97b3..f2c10dc14 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@ enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_CT.h b/include/uapi/linux/netfilter/xt_CT.h
index 5a688c1ca..9e520418b 100644
--- a/include/uapi/linux/netfilter/xt_CT.h
+++ b/include/uapi/linux/netfilter/xt_CT.h
@@ -6,7 +6,13 @@
enum {
XT_CT_NOTRACK = 1 << 0,
XT_CT_NOTRACK_ALIAS = 1 << 1,
- XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+ XT_CT_ZONE_DIR_ORIG = 1 << 2,
+ XT_CT_ZONE_DIR_REPL = 1 << 3,
+ XT_CT_ZONE_MARK = 1 << 4,
+
+ XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+ XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+ XT_CT_ZONE_MARK,
};
struct xt_ct_target_info {
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
index 205ed62e4..cd2e940c8 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -10,7 +10,9 @@ enum ip6t_reject_with {
IP6T_ICMP6_ADDR_UNREACH,
IP6T_ICMP6_PORT_UNREACH,
IP6T_ICMP6_ECHOREPLY,
- IP6T_TCP_RESET
+ IP6T_TCP_RESET,
+ IP6T_ICMP6_POLICY_FAIL,
+ IP6T_ICMP6_REJECT_ROUTE
};
struct ip6t_reject_info {
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 20d68edd5..8f6e0e1b3 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -112,6 +112,7 @@ struct nlmsgerr {
#define NETLINK_TX_RING 7
#define NETLINK_LISTEN_ALL_NSID 8
#define NETLINK_LIST_MEMBERSHIPS 9
+#define NETLINK_CAP_ACK 10
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2119c7c27..2b871e085 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -15,7 +15,7 @@
#include <linux/types.h>
-#define NFS4_BITMAP_SIZE 2
+#define NFS4_BITMAP_SIZE 3
#define NFS4_VERIFIER_SIZE 8
#define NFS4_STATEID_SEQID_SIZE 4
#define NFS4_STATEID_OTHER_SIZE 12
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index 9bb9771a1..552726631 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -22,6 +22,7 @@
#define NFS_ACLCNT 0x0002
#define NFS_DFACL 0x0004
#define NFS_DFACLCNT 0x0008
+#define NFS_ACL_MASK 0x000f
/* Flag for Default ACL entries */
#define NFS_ACL_DEFAULT 0x1000
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 732b32e92..8864194a4 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -584,5 +584,6 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#define NVME_IOCTL_RESET _IO('N', 0x44)
+#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab77601..e663627a8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -164,6 +164,9 @@ enum ovs_packet_cmd {
* %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
* output port is actually a tunnel port. Contains the output tunnel key
* extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
+ * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
+ * size.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -180,6 +183,7 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_UNUSED2,
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
+ OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
__OVS_PACKET_ATTR_MAX
};
@@ -319,9 +323,13 @@ enum ovs_key_attr {
OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
* The implementation may restrict
* the accepted length of the array. */
+ OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */
+ OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */
+ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */
+ OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
#ifdef __KERNEL__
- OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
#endif
__OVS_KEY_ATTR_MAX
};
@@ -431,6 +439,20 @@ struct ovs_key_nd {
__u8 nd_tll[ETH_ALEN];
};
+#define OVS_CT_LABELS_LEN 16
+struct ovs_key_ct_labels {
+ __u8 ct_labels[OVS_CT_LABELS_LEN];
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW 0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED 0x04 /* Related to an established
+ * connection. */
+#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */
+
/**
* enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
* @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -595,6 +617,34 @@ struct ovs_action_hash {
};
/**
+ * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
+ * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
+ * table. This allows future packets for the same connection to be identified
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
+ * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
+ * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
+ * mask, the corresponding bit in the value is copied to the connection
+ * tracking mark field in the connection.
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
+ * mask. For each bit set in the mask, the corresponding bit in the value is
+ * copied to the connection tracking label field in the connection.
+ * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+ */
+enum ovs_ct_attr {
+ OVS_CT_ATTR_UNSPEC,
+ OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */
+ OVS_CT_ATTR_ZONE, /* u16 zone id. */
+ OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
+ OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */
+ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
+ related connections. */
+ __OVS_CT_ATTR_MAX
+};
+
+#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
+
+/**
* enum ovs_action_attr - Action types.
*
* @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
@@ -623,6 +673,8 @@ struct ovs_action_hash {
* indicate the new packet contents. This could potentially still be
* %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
* is no MPLS label stack, as determined by ethertype, no action is taken.
+ * @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
+ * entries in the flow key.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -648,6 +700,7 @@ enum ovs_action_attr {
* data immediately followed by a mask.
* The data must be zero for the unmasked
* bits. */
+ OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d97f84c08..2881145cd 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -330,7 +330,8 @@ struct perf_event_attr {
mmap2 : 1, /* include mmap with inode data */
comm_exec : 1, /* flag comm events that are due to an exec */
use_clockid : 1, /* use @clockid for time fields */
- __reserved_1 : 38;
+ context_switch : 1, /* context switch data */
+ __reserved_1 : 37;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -572,9 +573,11 @@ struct perf_event_mmap_page {
/*
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
* different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
+#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/*
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
@@ -818,6 +821,32 @@ enum perf_event_type {
*/
PERF_RECORD_LOST_SAMPLES = 13,
+ /*
+ * Records a context switch in or out (flagged by
+ * PERF_RECORD_MISC_SWITCH_OUT). See also
+ * PERF_RECORD_SWITCH_CPU_WIDE.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH = 14,
+
+ /*
+ * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+ * next_prev_tid that are the next (switching out) or previous
+ * (switching in) pid/tid.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 next_prev_pid;
+ * u32 next_prev_tid;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -922,6 +951,7 @@ union perf_mem_data_src {
*
* in_tx: running in a hardware transaction
* abort: aborting a hardware transaction
+ * cycles: cycles from last branch (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -930,7 +960,8 @@ struct perf_branch_entry {
predicted:1,/* target predicted */
in_tx:1, /* in transaction */
abort:1, /* transaction abort */
- reserved:60;
+ cycles:16, /* cycle count to last branch */
+ reserved:44;
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 31891d953..a8d0759a9 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -190,4 +190,11 @@ struct prctl_mm_map {
# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT 47
+# define PR_CAP_AMBIENT_IS_SET 1
+# define PR_CAP_AMBIENT_RAISE 2
+# define PR_CAP_AMBIENT_LOWER 3
+# define PR_CAP_AMBIENT_CLEAR_ALL 4
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index cf1019e15..a7a697986 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -89,9 +89,11 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP)
/* eventless options */
-#define PTRACE_O_EXITKILL (1 << 20)
+#define PTRACE_O_EXITKILL (1 << 20)
+#define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
-#define PTRACE_O_MASK (0x000000ff | PTRACE_O_EXITKILL)
+#define PTRACE_O_MASK (\
+ 0x000000ff | PTRACE_O_EXITKILL | PTRACE_O_SUSPEND_SECCOMP)
#include <asm/ptrace.h>
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07f1..9d8f5d10c 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -160,7 +160,7 @@ struct rtattr {
/* Macros to handle rtattributes */
-#define RTA_ALIGNTO 4
+#define RTA_ALIGNTO 4U
#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
(rta)->rta_len >= sizeof(struct rtattr) && \
@@ -308,6 +308,8 @@ enum rtattr_type_t {
RTA_VIA,
RTA_NEWDST,
RTA_PREF,
+ RTA_ENCAP_TYPE,
+ RTA_ENCAP,
__RTA_MAX
};
@@ -416,10 +418,13 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
-#define RTAX_FEATURE_ECN 0x00000001
-#define RTAX_FEATURE_SACK 0x00000002
-#define RTAX_FEATURE_TIMESTAMP 0x00000004
-#define RTAX_FEATURE_ALLFRAG 0x00000008
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1)
+#define RTAX_FEATURE_TIMESTAMP (1 << 2)
+#define RTAX_FEATURE_ALLFRAG (1 << 3)
+
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
struct rta_session {
__u8 proto;
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index cc89ddefa..f63e1cd82 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -37,9 +37,16 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
-/* SCHED_ISO: reserved but not implemented yet */
+/* SCHED_ISO: Implemented on BFS only */
#define SCHED_IDLE 5
+#ifdef CONFIG_SCHED_BFS
+#define SCHED_ISO 4
+#define SCHED_IDLEPRIO SCHED_IDLE
+#define SCHED_MAX (SCHED_IDLEPRIO)
+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
+#else /* CONFIG_SCHED_BFS */
#define SCHED_DEADLINE 6
+#endif /* CONFIG_SCHED_BFS */
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index 985aac9e6..35ac35cef 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -43,9 +43,18 @@
#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
+/* When set, a process cannot add new capabilities to its ambient set. */
+#define SECURE_NO_CAP_AMBIENT_RAISE 6
+#define SECURE_NO_CAP_AMBIENT_RAISE_LOCKED 7 /* make bit-6 immutable */
+
+#define SECBIT_NO_CAP_AMBIENT_RAISE (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
+#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
+ (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
+
#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
issecure_mask(SECURE_NO_SETUID_FIXUP) | \
- issecure_mask(SECURE_KEEP_CAPS))
+ issecure_mask(SECURE_KEEP_CAPS) | \
+ issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
#endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee896840..25a9ad8bc 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
+ LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
+ LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b67f99d3c..95c6521d8 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -42,10 +42,6 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
-/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
-#define xstr(s) str(s)
-#define str(s) #s
-
struct tcmu_mailbox {
__u16 version;
__u16 flags;
diff --git a/include/uapi/linux/toshiba.h b/include/uapi/linux/toshiba.h
index e9bef5b2f..c58bf4b5b 100644
--- a/include/uapi/linux/toshiba.h
+++ b/include/uapi/linux/toshiba.h
@@ -1,6 +1,7 @@
/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops
*
* Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ * Copyright (c) 2015 Azael Avalos <coproscefalo@gmail.com>
*
* Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers
* on making sure the structure is aligned and packed.
@@ -20,9 +21,18 @@
#ifndef _UAPI_LINUX_TOSHIBA_H
#define _UAPI_LINUX_TOSHIBA_H
-#define TOSH_PROC "/proc/toshiba"
-#define TOSH_DEVICE "/dev/toshiba"
-#define TOSH_SMM _IOWR('t', 0x90, int) /* broken: meant 24 bytes */
+/*
+ * Toshiba modules paths
+ */
+
+#define TOSH_PROC "/proc/toshiba"
+#define TOSH_DEVICE "/dev/toshiba"
+#define TOSHIBA_ACPI_PROC "/proc/acpi/toshiba"
+#define TOSHIBA_ACPI_DEVICE "/dev/toshiba_acpi"
+
+/*
+ * Toshiba SMM structure
+ */
typedef struct {
unsigned int eax;
@@ -33,5 +43,21 @@ typedef struct {
unsigned int edi __attribute__ ((packed));
} SMMRegisters;
+/*
+ * IOCTLs (0x90 - 0x91)
+ */
+
+#define TOSH_SMM _IOWR('t', 0x90, SMMRegisters)
+/*
+ * Convenience toshiba_acpi command.
+ *
+ * The System Configuration Interface (SCI) is opened/closed internally
+ * to avoid userspace of buggy BIOSes.
+ *
+ * The toshiba_acpi module checks whether the eax register is set with
+ * SCI_GET (0xf300) or SCI_SET (0xf400), returning -EINVAL if not.
+ */
+#define TOSHIBA_ACPI_SCI _IOWR('t', 0x91, SMMRegisters)
+
#endif /* _UAPI_LINUX_TOSHIBA_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index aa33fd1b2..f7adc6e01 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -674,9 +674,21 @@ struct usb_otg_descriptor {
__u8 bmAttributes; /* support for HNP, SRP, etc */
} __attribute__ ((packed));
+/* USB_DT_OTG (from OTG 2.0 supplement) */
+struct usb_otg20_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+
+ __u8 bmAttributes; /* support for HNP, SRP and ADP, etc */
+ __le16 bcdOTG; /* OTG and EH supplement release number
+ * in binary-coded decimal(i.e. 2.0 is 0200H)
+ */
+} __attribute__ ((packed));
+
/* from usb_otg_descriptor.bmAttributes */
#define USB_OTG_SRP (1 << 0)
#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
+#define USB_OTG_ADP (1 << 2) /* support ADP */
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
new file mode 100644
index 000000000..9057d7af3
--- /dev/null
+++ b/include/uapi/linux/userfaultfd.h
@@ -0,0 +1,167 @@
+/*
+ * include/linux/userfaultfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_H
+#define _LINUX_USERFAULTFD_H
+
+#include <linux/types.h>
+
+#define UFFD_API ((__u64)0xAA)
+/*
+ * After implementing the respective features it will become:
+ * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ * UFFD_FEATURE_EVENT_FORK)
+ */
+#define UFFD_API_FEATURES (0)
+#define UFFD_API_IOCTLS \
+ ((__u64)1 << _UFFDIO_REGISTER | \
+ (__u64)1 << _UFFDIO_UNREGISTER | \
+ (__u64)1 << _UFFDIO_API)
+#define UFFD_API_RANGE_IOCTLS \
+ ((__u64)1 << _UFFDIO_WAKE | \
+ (__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_ZEROPAGE)
+
+/*
+ * Valid ioctl command number range with this API is from 0x00 to
+ * 0x3F. UFFDIO_API is the fixed number, everything else can be
+ * changed by implementing a different UFFD_API. If sticking to the
+ * same UFFD_API more ioctl can be added and userland will be aware of
+ * which ioctl the running kernel implements through the ioctl command
+ * bitmask written by the UFFDIO_API.
+ */
+#define _UFFDIO_REGISTER (0x00)
+#define _UFFDIO_UNREGISTER (0x01)
+#define _UFFDIO_WAKE (0x02)
+#define _UFFDIO_COPY (0x03)
+#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_API (0x3F)
+
+/* userfaultfd ioctl ids */
+#define UFFDIO 0xAA
+#define UFFDIO_API _IOWR(UFFDIO, _UFFDIO_API, \
+ struct uffdio_api)
+#define UFFDIO_REGISTER _IOWR(UFFDIO, _UFFDIO_REGISTER, \
+ struct uffdio_register)
+#define UFFDIO_UNREGISTER _IOR(UFFDIO, _UFFDIO_UNREGISTER, \
+ struct uffdio_range)
+#define UFFDIO_WAKE _IOR(UFFDIO, _UFFDIO_WAKE, \
+ struct uffdio_range)
+#define UFFDIO_COPY _IOWR(UFFDIO, _UFFDIO_COPY, \
+ struct uffdio_copy)
+#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
+ struct uffdio_zeropage)
+
+/* read() structure */
+struct uffd_msg {
+ __u8 event;
+
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 reserved3;
+
+ union {
+ struct {
+ __u64 flags;
+ __u64 address;
+ } pagefault;
+
+ struct {
+ /* unused reserved fields */
+ __u64 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ } reserved;
+ } arg;
+} __packed;
+
+/*
+ * Start at 0x12 and not at 0 to be more strict against bugs.
+ */
+#define UFFD_EVENT_PAGEFAULT 0x12
+#if 0 /* not available yet */
+#define UFFD_EVENT_FORK 0x13
+#endif
+
+/* flags for UFFD_EVENT_PAGEFAULT */
+#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
+#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
+
+struct uffdio_api {
+ /* userland asks for an API number and the features to enable */
+ __u64 api;
+ /*
+ * Kernel answers below with the all available features for
+ * the API, this notifies userland of which events and/or
+ * which flags for each event are enabled in the current
+ * kernel.
+ *
+ * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE
+ * are to be considered implicitly always enabled in all kernels as
+ * long as the uffdio_api.api requested matches UFFD_API.
+ */
+#if 0 /* not available yet */
+#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
+#define UFFD_FEATURE_EVENT_FORK (1<<1)
+#endif
+ __u64 features;
+
+ __u64 ioctls;
+};
+
+struct uffdio_range {
+ __u64 start;
+ __u64 len;
+};
+
+struct uffdio_register {
+ struct uffdio_range range;
+#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
+#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
+ __u64 mode;
+
+ /*
+ * kernel answers which ioctl commands are available for the
+ * range, keep at the end as the last 8 bytes aren't read.
+ */
+ __u64 ioctls;
+};
+
+struct uffdio_copy {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * There will be a wrprotection flag later that allows to map
+ * pages wrprotected on the fly. And such a flag will be
+ * available if the wrprotection ioctl are implemented for the
+ * range according to the uffdio_register.ioctls.
+ */
+#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * "copy" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 copy;
+};
+
+struct uffdio_zeropage {
+ struct uffdio_range range;
+#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * "zeropage" is written by the ioctl and must be at the end:
+ * the copy_from_user will not read the last 8 bytes.
+ */
+ __s64 zeropage;
+};
+
+#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 9f6e108ff..d448c536b 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -174,6 +174,10 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
+/* The base for the tc358743 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vsp1.h b/include/uapi/linux/vsp1.h
index e18858f6e..9a823696d 100644
--- a/include/uapi/linux/vsp1.h
+++ b/include/uapi/linux/vsp1.h
@@ -28,7 +28,7 @@
_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config)
struct vsp1_lut_config {
- u32 lut[256];
+ __u32 lut[256];
};
#endif /* __VSP1_USER_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca15f..1e889aa8a 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@ struct cxl_ioctl_start_work {
#define CXL_START_WORK_AMR 0x0000000000000001ULL
#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
+#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
- CXL_START_WORK_NUM_IRQS)
+ CXL_START_WORK_NUM_IRQS |\
+ CXL_START_WORK_ERR_FF)
/* Possible modes that an afu can be in */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 687ae3322..231901b08 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -5,3 +5,4 @@ header-y += ib_user_sa.h
header-y += ib_user_verbs.h
header-y += rdma_netlink.h
header-y += rdma_user_cm.h
+header-y += hfi/
diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild
new file mode 100644
index 000000000..ef23c294f
--- /dev/null
+++ b/include/uapi/rdma/hfi/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += hfi1_user.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
new file mode 100644
index 000000000..78c442fbf
--- /dev/null
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -0,0 +1,427 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+#ifndef _LINUX__HFI1_USER_H
+#define _LINUX__HFI1_USER_H
+
+#include <linux/types.h>
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of hfi1_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures change in an incompatible
+ * way. The driver must be the same for initialization to succeed.
+ */
+#define HFI1_USER_SWMAJOR 4
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define HFI1_USER_SWMINOR 0
+
+/*
+ * Set of HW and driver capability/feature bits.
+ * These bit values are used to configure enabled/disabled HW and
+ * driver features. The same set of bits are communicated to user
+ * space.
+ */
+#define HFI1_CAP_DMA_RTAIL (1UL << 0) /* Use DMA'ed RTail value */
+#define HFI1_CAP_SDMA (1UL << 1) /* Enable SDMA support */
+#define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */
+#define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */
+#define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */
+/* 1UL << 5 reserved */
+#define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */
+#define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/
+#define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */
+#define HFI1_CAP_NODROP_EGR_FULL (1UL << 9) /* Don't drop on EGR buffs full */
+#define HFI1_CAP_TID_UNMAP (1UL << 10) /* Enable Expected TID caching */
+#define HFI1_CAP_PRINT_UNIMPL (1UL << 11) /* Show for unimplemented feats */
+#define HFI1_CAP_ALLOW_PERM_JKEY (1UL << 12) /* Allow use of permissive JKEY */
+#define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */
+#define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */
+#define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */
+#define HFI1_CAP_QSFP_ENABLED (1UL << 16) /* Enable QSFP check during LNI */
+#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
+#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
+
+#define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0)
+#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
+#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
+
+/*
+ * If the unit is specified via open, HFI choice is fixed. If port is
+ * specified, it's also fixed. Otherwise we try to spread contexts
+ * across ports and HFIs, using different algorithms. WITHIN is
+ * the old default, prior to this mechanism.
+ */
+#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
+ * ports; this is the default */
+#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
+ * active ports within), then next HFI */
+#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
+
+
+/* User commands. */
+#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
+#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
+#define HFI1_CMD_USER_INFO 3 /* set up userspace */
+#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
+#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
+#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
+#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
+
+#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
+#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
+#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */
+#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
+#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
+/* separate EPROM commands from normal PSM commands */
+#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
+#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
+#define HFI1_CMD_EP_ERASE_P0 66 /* erase EPROM partition 0 */
+#define HFI1_CMD_EP_ERASE_P1 67 /* erase EPROM partition 1 */
+#define HFI1_CMD_EP_READ_P0 68 /* read EPROM partition 0 */
+#define HFI1_CMD_EP_READ_P1 69 /* read EPROM partition 1 */
+#define HFI1_CMD_EP_WRITE_P0 70 /* write EPROM partition 0 */
+#define HFI1_CMD_EP_WRITE_P1 71 /* write EPROM partition 1 */
+
+#define _HFI1_EVENT_FROZEN_BIT 0
+#define _HFI1_EVENT_LINKDOWN_BIT 1
+#define _HFI1_EVENT_LID_CHANGE_BIT 2
+#define _HFI1_EVENT_LMC_CHANGE_BIT 3
+#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4
+#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT
+
+#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT)
+#define HFI1_EVENT_LINKDOWN_BIT (1UL << _HFI1_EVENT_LINKDOWN_BIT)
+#define HFI1_EVENT_LID_CHANGE_BIT (1UL << _HFI1_EVENT_LID_CHANGE_BIT)
+#define HFI1_EVENT_LMC_CHANGE_BIT (1UL << _HFI1_EVENT_LMC_CHANGE_BIT)
+#define HFI1_EVENT_SL2VL_CHANGE_BIT (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT)
+
+/*
+ * These are the status bits readable (in ASCII form, 64bit value)
+ * from the "status" sysfs file. For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define HFI1_STATUS_INITTED 0x1 /* basic initialization done */
+/* Chip has been found and initialized */
+#define HFI1_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define HFI1_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define HFI1_STATUS_IB_CONF 0x80
+/* A Fatal hardware error has occurred. */
+#define HFI1_STATUS_HWERROR 0x200
+
+/*
+ * Number of supported shared contexts.
+ * This is the maximum number of software contexts that can share
+ * a hardware send/receive context.
+ */
+#define HFI1_MAX_SHARED_CTXTS 8
+
+/*
+ * Poll types
+ */
+#define HFI1_POLL_TYPE_ANYRCV 0x0
+#define HFI1_POLL_TYPE_URGENT 0x1
+
+/*
+ * This structure is passed to the driver to tell it where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct hfi1_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to HFI1_USER_SWVERSION.
+ */
+ __u32 userversion;
+ __u16 pad;
+ /* HFI selection algorithm, if unit has not selected */
+ __u16 hfi1_alg;
+ /*
+ * If two or more processes wish to share a context, each process
+ * must set the subcontext_cnt and subcontext_id to the same
+ * values. The only restriction on the subcontext_id is that
+ * it be unique for a given node.
+ */
+ __u16 subctxt_cnt;
+ __u16 subctxt_id;
+ /* 128bit UUID passed in by PSM. */
+ __u8 uuid[16];
+};
+
+struct hfi1_ctxt_info {
+ __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __u32 rcvegr_size; /* size of each eager buffer */
+ __u16 num_active; /* number of active units */
+ __u16 unit; /* unit (chip) assigned to caller */
+ __u16 ctxt; /* ctxt on unit assigned to caller */
+ __u16 subctxt; /* subctxt on unit assigned to caller */
+ __u16 rcvtids; /* number of Rcv TIDs for this context */
+ __u16 credits; /* number of PIO credits for this context */
+ __u16 numa_node; /* NUMA node of the assigned device */
+ __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */
+ __u16 send_ctxt; /* send context in use by this user context */
+ __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */
+ __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */
+ __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */
+ __u16 sdma_ring_size; /* number of entries in SDMA request ring */
+};
+
+struct hfi1_tid_info {
+ /* virtual address of first page in transfer */
+ __u64 vaddr;
+ /* pointer to tid array. this array is big enough */
+ __u64 tidlist;
+ /* number of tids programmed by this request */
+ __u32 tidcnt;
+ /* length of transfer buffer programmed by this request */
+ __u32 length;
+ /*
+ * pointer to bitmap of TIDs used for this call;
+ * checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct hfi1_cmd {
+ __u32 type; /* command type */
+ __u32 len; /* length of struct pointed to by add */
+ __u64 addr; /* pointer to user structure */
+};
+
+enum hfi1_sdma_comp_state {
+ FREE = 0,
+ QUEUED,
+ COMPLETE,
+ ERROR
+};
+
+/*
+ * SDMA completion ring entry
+ */
+struct hfi1_sdma_comp_entry {
+ __u32 status;
+ __u32 errcode;
+};
+
+/*
+ * Device status and notifications from driver to user-space.
+ */
+struct hfi1_status {
+ __u64 dev; /* device/hw status bits */
+ __u64 port; /* port state and status bits */
+ char freezemsg[0];
+};
+
+/*
+ * This structure is returned by the driver immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explicit pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct hfi1_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 hw_version;
+ /* version of software, for feature checking. */
+ __u32 sw_version;
+ /* Job key */
+ __u16 jkey;
+ __u16 padding1;
+ /*
+ * The special QP (queue pair) value that identifies PSM
+ * protocol packet from standard IB packets.
+ */
+ __u32 bthqp;
+ /* PIO credit return address, */
+ __u64 sc_credits_addr;
+ /*
+ * Base address of write-only pio buffers for this process.
+ * Each buffer has sendpio_credits*64 bytes.
+ */
+ __u64 pio_bufbase_sop;
+ /*
+ * Base address of write-only pio buffers for this process.
+ * Each buffer has sendpio_credits*64 bytes.
+ */
+ __u64 pio_bufbase;
+ /* address where receive buffer queue is mapped into */
+ __u64 rcvhdr_bufbase;
+ /* base address of Eager receive buffers. */
+ __u64 rcvegr_bufbase;
+ /* base address of SDMA completion ring */
+ __u64 sdma_comp_bufbase;
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications. Always maps real chip register space.
+ * the register addresses are:
+ * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
+ * ur_rcvtidflow
+ */
+ __u64 user_regbase;
+ /* notification events */
+ __u64 events_bufbase;
+ /* status page */
+ __u64 status_bufbase;
+ /* rcvhdrtail update */
+ __u64 rcvhdrtail_base;
+ /*
+ * shared memory pages for subctxts if ctxt is shared; these cover
+ * all the processes in the group sharing a single context.
+ * all have enough space for the num_subcontexts value on this job.
+ */
+ __u64 subctxt_uregbase;
+ __u64 subctxt_rcvegrbuf;
+ __u64 subctxt_rcvhdrbuf;
+};
+
+enum sdma_req_opcode {
+ EXPECTED = 0,
+ EAGER
+};
+
+#define HFI1_SDMA_REQ_VERSION_MASK 0xF
+#define HFI1_SDMA_REQ_VERSION_SHIFT 0x0
+#define HFI1_SDMA_REQ_OPCODE_MASK 0xF
+#define HFI1_SDMA_REQ_OPCODE_SHIFT 0x4
+#define HFI1_SDMA_REQ_IOVCNT_MASK 0xFF
+#define HFI1_SDMA_REQ_IOVCNT_SHIFT 0x8
+
+struct sdma_req_info {
+ /*
+ * bits 0-3 - version (currently unused)
+ * bits 4-7 - opcode (enum sdma_req_opcode)
+ * bits 8-15 - io vector count
+ */
+ __u16 ctrl;
+ /*
+ * Number of fragments contained in this request.
+ * User-space has already computed how many
+ * fragment-sized packet the user buffer will be
+ * split into.
+ */
+ __u16 npkts;
+ /*
+ * Size of each fragment the user buffer will be
+ * split into.
+ */
+ __u16 fragsize;
+ /*
+ * Index of the slot in the SDMA completion ring
+ * this request should be using. User-space is
+ * in charge of managing its own ring.
+ */
+ __u16 comp_idx;
+} __packed;
+
+/*
+ * SW KDETH header.
+ * swdata is SW defined portion.
+ */
+struct hfi1_kdeth_header {
+ __le32 ver_tid_offset;
+ __le16 jkey;
+ __le16 hcrc;
+ __le32 swdata[7];
+} __packed;
+
+/*
+ * Structure describing the headers that User space uses. The
+ * structure above is a subset of this one.
+ */
+struct hfi1_pkt_header {
+ __le16 pbc[4];
+ __be16 lrh[4];
+ __be32 bth[3];
+ struct hfi1_kdeth_header kdeth;
+} __packed;
+
+
+/*
+ * The list of usermode accessible registers.
+ */
+enum hfi1_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* (RO) Receive Eager Offset Tail */
+ ur_rcvegroffsettail = 4,
+ /* For internal use only; max register number. */
+ ur_maxreg,
+ /* (RW) Receive TID flow table */
+ ur_rcvtidflowtable = 256
+};
+
+#endif /* _LINIUX__HFI1_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e4bb4270..c19a5dc15 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -7,12 +7,14 @@ enum {
RDMA_NL_RDMA_CM = 1,
RDMA_NL_NES,
RDMA_NL_C4IW,
+ RDMA_NL_LS, /* RDMA Local Services */
RDMA_NL_NUM_CLIENTS
};
enum {
RDMA_NL_GROUP_CM = 1,
RDMA_NL_GROUP_IWPM,
+ RDMA_NL_GROUP_LS,
RDMA_NL_NUM_GROUPS
};
@@ -128,5 +130,85 @@ enum {
IWPM_NLA_ERR_MAX
};
+/*
+ * Local service operations:
+ * RESOLVE - The client requests the local service to resolve a path.
+ * SET_TIMEOUT - The local service requests the client to set the timeout.
+ */
+enum {
+ RDMA_NL_LS_OP_RESOLVE = 0,
+ RDMA_NL_LS_OP_SET_TIMEOUT,
+ RDMA_NL_LS_NUM_OPS
+};
+
+/* Local service netlink message flags */
+#define RDMA_NL_LS_F_ERR 0x0100 /* Failed response */
+
+/*
+ * Local service resolve operation family header.
+ * The layout for the resolve operation:
+ * nlmsg header
+ * family header
+ * attributes
+ */
+
+/*
+ * Local service path use:
+ * Specify how the path(s) will be used.
+ * ALL - For connected CM operation (6 pathrecords)
+ * UNIDIRECTIONAL - For unidirectional UD (1 pathrecord)
+ * GMP - For miscellaneous GMP like operation (at least 1 reversible
+ * pathrecord)
+ */
+enum {
+ LS_RESOLVE_PATH_USE_ALL = 0,
+ LS_RESOLVE_PATH_USE_UNIDIRECTIONAL,
+ LS_RESOLVE_PATH_USE_GMP,
+ LS_RESOLVE_PATH_USE_MAX
+};
+
+#define LS_DEVICE_NAME_MAX 64
+
+struct rdma_ls_resolve_header {
+ __u8 device_name[LS_DEVICE_NAME_MAX];
+ __u8 port_num;
+ __u8 path_use;
+};
+
+/* Local service attribute type */
+#define RDMA_NLA_F_MANDATORY (1 << 13)
+#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
+ RDMA_NLA_F_MANDATORY))
+
+/*
+ * Local service attributes:
+ * Attr Name Size Byte order
+ * -----------------------------------------------------
+ * PATH_RECORD struct ib_path_rec_data
+ * TIMEOUT u32 cpu
+ * SERVICE_ID u64 cpu
+ * DGID u8[16] BE
+ * SGID u8[16] BE
+ * TCLASS u8
+ * PKEY u16 cpu
+ * QOS_CLASS u16 cpu
+ */
+enum {
+ LS_NLA_TYPE_UNSPEC = 0,
+ LS_NLA_TYPE_PATH_RECORD,
+ LS_NLA_TYPE_TIMEOUT,
+ LS_NLA_TYPE_SERVICE_ID,
+ LS_NLA_TYPE_DGID,
+ LS_NLA_TYPE_SGID,
+ LS_NLA_TYPE_TCLASS,
+ LS_NLA_TYPE_PKEY,
+ LS_NLA_TYPE_QOS_CLASS,
+ LS_NLA_TYPE_MAX
+};
+
+/* Local service DGID/SGID attribute: big endian */
+struct rdma_nla_ls_gid {
+ __u8 gid[16];
+};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 75746d52f..d791e0ad5 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -3,3 +3,4 @@ header-y += fc/
header-y += scsi_bsg_fc.h
header-y += scsi_netlink.h
header-y += scsi_netlink_fc.h
+header-y += cxlflash_ioctl.h
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
new file mode 100644
index 000000000..831351b2e
--- /dev/null
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -0,0 +1,174 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_IOCTL_H
+#define _CXLFLASH_IOCTL_H
+
+#include <linux/types.h>
+
+/*
+ * Structure and flag definitions CXL Flash superpipe ioctls
+ */
+
+#define DK_CXLFLASH_VERSION_0 0
+
+struct dk_cxlflash_hdr {
+ __u16 version; /* Version data */
+ __u16 rsvd[3]; /* Reserved for future use */
+ __u64 flags; /* Input flags */
+ __u64 return_flags; /* Returned flags */
+};
+
+/*
+ * Notes:
+ * -----
+ * The 'context_id' field of all ioctl structures contains the context
+ * identifier for a context in the lower 32-bits (upper 32-bits are not
+ * to be used when identifying a context to the AFU). That said, the value
+ * in its entirety (all 64-bits) is to be treated as an opaque cookie and
+ * should be presented as such when issuing ioctls.
+ *
+ * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
+ * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
+ * the fcntl.h header file.
+ */
+#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
+
+struct dk_cxlflash_attach {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 num_interrupts; /* Requested number of interrupts */
+ __u64 context_id; /* Returned context */
+ __u64 mmio_size; /* Returned size of MMIO area */
+ __u64 block_size; /* Returned block size, in bytes */
+ __u64 adap_fd; /* Returned adapter file descriptor */
+ __u64 last_lba; /* Returned last LBA on the device */
+ __u64 max_xfer; /* Returned max transfer size, blocks */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+struct dk_cxlflash_detach {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context to detach */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+struct dk_cxlflash_udirect {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context to own physical resources */
+ __u64 rsrc_handle; /* Returned resource handle */
+ __u64 last_lba; /* Returned last LBA on the device */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+#define DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME 0x8000000000000000ULL
+
+struct dk_cxlflash_uvirtual {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context to own virtual resources */
+ __u64 lun_size; /* Requested size, in 4K blocks */
+ __u64 rsrc_handle; /* Returned resource handle */
+ __u64 last_lba; /* Returned last LBA of LUN */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+struct dk_cxlflash_release {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context owning resources */
+ __u64 rsrc_handle; /* Resource handle to release */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+struct dk_cxlflash_resize {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context owning resources */
+ __u64 rsrc_handle; /* Resource handle of LUN to resize */
+ __u64 req_size; /* New requested size, in 4K blocks */
+ __u64 last_lba; /* Returned last LBA of LUN */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+struct dk_cxlflash_clone {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id_src; /* Context to clone from */
+ __u64 context_id_dst; /* Context to clone to */
+ __u64 adap_fd_src; /* Source context adapter fd */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+#define DK_CXLFLASH_VERIFY_SENSE_LEN 18
+#define DK_CXLFLASH_VERIFY_HINT_SENSE 0x8000000000000000ULL
+
+struct dk_cxlflash_verify {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 context_id; /* Context owning resources to verify */
+ __u64 rsrc_handle; /* Resource handle of LUN */
+ __u64 hint; /* Reasons for verify */
+ __u64 last_lba; /* Returned last LBA of device */
+ __u8 sense_data[DK_CXLFLASH_VERIFY_SENSE_LEN]; /* SCSI sense data */
+ __u8 pad[6]; /* Pad to next 8-byte boundary */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+#define DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET 0x8000000000000000ULL
+
+struct dk_cxlflash_recover_afu {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u64 reason; /* Reason for recovery request */
+ __u64 context_id; /* Context to recover / updated ID */
+ __u64 mmio_size; /* Returned size of MMIO area */
+ __u64 adap_fd; /* Returned adapter file descriptor */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16
+#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL
+#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL
+#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL
+
+struct dk_cxlflash_manage_lun {
+ struct dk_cxlflash_hdr hdr; /* Common fields */
+ __u8 wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; /* Page83 WWID, NAA-6 */
+ __u64 reserved[8]; /* Rsvd, future use */
+};
+
+union cxlflash_ioctls {
+ struct dk_cxlflash_attach attach;
+ struct dk_cxlflash_detach detach;
+ struct dk_cxlflash_udirect udirect;
+ struct dk_cxlflash_uvirtual uvirtual;
+ struct dk_cxlflash_release release;
+ struct dk_cxlflash_resize resize;
+ struct dk_cxlflash_clone clone;
+ struct dk_cxlflash_verify verify;
+ struct dk_cxlflash_recover_afu recover_afu;
+ struct dk_cxlflash_manage_lun manage_lun;
+};
+
+#define MAX_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ioctls))
+
+#define CXL_MAGIC 0xCA
+#define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s)
+
+#define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach)
+#define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect)
+#define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release)
+#define DK_CXLFLASH_DETACH CXL_IOWR(0x83, dk_cxlflash_detach)
+#define DK_CXLFLASH_VERIFY CXL_IOWR(0x84, dk_cxlflash_verify)
+#define DK_CXLFLASH_RECOVER_AFU CXL_IOWR(0x85, dk_cxlflash_recover_afu)
+#define DK_CXLFLASH_MANAGE_LUN CXL_IOWR(0x86, dk_cxlflash_manage_lun)
+#define DK_CXLFLASH_USER_VIRTUAL CXL_IOWR(0x87, dk_cxlflash_uvirtual)
+#define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize)
+#define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone)
+
+#endif /* ifndef _CXLFLASH_IOCTL_H */
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index a85316811..7ddeeda93 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
struct privcmd_mmap_entry {
__u64 va;
+ /*
+ * This should be a GFN. It's not possible to change the name because
+ * it's exposed to the user-space.
+ */
__u64 mfn;
__u64 npages;
};
diff --git a/include/video/kyro.h b/include/video/kyro.h
index c563968e9..b958c2e9c 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -35,9 +35,7 @@ struct kyrofb_info {
/* Useful to hold depth here for Linux */
u8 PIXDEPTH;
-#ifdef CONFIG_MTRR
- int mtrr_handle;
-#endif
+ int wc_cookie;
};
extern int kyro_dev_init(void);
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a4c..d8fc96ed1 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
/* Video buffer addresses */
#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff) (0x40A0 + ((_buff) * 8))
#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
#define VIDW_BUF_END(_buff) (0xD0 + ((_buff) * 8))
#define VIDW_BUF_END1(_buff) (0xD4 + ((_buff) * 8))
diff --git a/include/video/vga.h b/include/video/vga.h
index cac567f22..d334e64c1 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -18,7 +18,7 @@
#define __linux_video_vga_h__
#include <linux/types.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/vga.h>
#include <asm/byteorder.h>
diff --git a/include/xen/events.h b/include/xen/events.h
index 7d95fdf9c..88da2abaf 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -92,7 +92,6 @@ void xen_hvm_callback_vector(void);
#ifdef CONFIG_TRACING
#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
#endif
-extern int xen_have_vector_callback;
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 70054cc07..252ffd480 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -156,7 +156,9 @@ struct xen_netif_tx_request {
/* Types of xen_netif_extra_info descriptors. */
#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
-#define XEN_NETIF_EXTRA_TYPE_MAX (2)
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MAX (4)
/* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -201,6 +203,10 @@ struct xen_netif_extra_info {
uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
} gso;
+ struct {
+ uint8_t addr[6]; /* Address to add/remove. */
+ } mcast;
+
uint16_t pad[3];
} u;
};
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5cc49ea8d..8e0358713 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -474,6 +474,23 @@ struct xenpf_core_parking {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
+#define XENPF_get_symbol 63
+struct xenpf_symdata {
+ /* IN/OUT variables */
+ uint32_t namelen; /* size of 'name' buffer */
+
+ /* IN/OUT variables */
+ uint32_t symnum; /* IN: Symbol to read */
+ /* OUT: Next available symbol. If same as IN */
+ /* then we reached the end */
+
+ /* OUT variables */
+ GUEST_HANDLE(char) name;
+ uint64_t address;
+ char type;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -495,6 +512,7 @@ struct xen_platform_op {
struct xenpf_cpu_hotadd cpu_add;
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
+ struct xenpf_symdata symdata;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index a48378958..167071c29 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -80,6 +80,7 @@
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op 40
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@@ -112,6 +113,7 @@
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -585,26 +587,29 @@ struct shared_info {
};
/*
- * Start-of-day memory layout for the initial domain (DOM0):
+ * Start-of-day memory layout
+ *
* 1. The domain is started within contiguous virtual-memory region.
* 2. The contiguous region begins and ends on an aligned 4MB boundary.
- * 3. The region start corresponds to the load address of the OS image.
- * If the load address is not 4MB aligned then the address is rounded down.
- * 4. This the order of bootstrap elements in the initial virtual region:
+ * 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
+ * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
- * e. bootstrap page tables [pt_base, CR3 (x86)]
- * f. bootstrap stack [register ESP (x86)]
- * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
- * 6. The initial ram disk may be omitted.
- * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base, CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
- * 8. All bootstrap elements are mapped read-writable for the guest OS. The
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
- * 9. There is guaranteed to be at least 512kB padding after the final
+ * 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
*/
@@ -641,10 +646,12 @@ struct start_info {
};
/* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
-#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
new file mode 100644
index 000000000..139efc91b
--- /dev/null
+++ b/include/xen/interface/xenpmu.h
@@ -0,0 +1,94 @@
+#ifndef __XEN_PUBLIC_XENPMU_H__
+#define __XEN_PUBLIC_XENPMU_H__
+
+#include "xen.h"
+
+#define XENPMU_VER_MAJ 0
+#define XENPMU_VER_MIN 1
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
+ *
+ * @cmd == XENPMU_* (PMU operation)
+ * @args == struct xenpmu_params
+ */
+/* ` enum xenpmu_op { */
+#define XENPMU_mode_get 0 /* Also used for getting PMU version */
+#define XENPMU_mode_set 1
+#define XENPMU_feature_get 2
+#define XENPMU_feature_set 3
+#define XENPMU_init 4
+#define XENPMU_finish 5
+#define XENPMU_lvtpc_set 6
+#define XENPMU_flush 7
+
+/* ` } */
+
+/* Parameters structure for HYPERVISOR_xenpmu_op call */
+struct xen_pmu_params {
+ /* IN/OUT parameters */
+ struct {
+ uint32_t maj;
+ uint32_t min;
+ } version;
+ uint64_t val;
+
+ /* IN parameters */
+ uint32_t vcpu;
+ uint32_t pad;
+};
+
+/* PMU modes:
+ * - XENPMU_MODE_OFF: No PMU virtualization
+ * - XENPMU_MODE_SELF: Guests can profile themselves
+ * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
+ * itself and Xen
+ * - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
+ * everyone: itself, the hypervisor and the guests.
+ */
+#define XENPMU_MODE_OFF 0
+#define XENPMU_MODE_SELF (1<<0)
+#define XENPMU_MODE_HV (1<<1)
+#define XENPMU_MODE_ALL (1<<2)
+
+/*
+ * PMU features:
+ * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
+ */
+#define XENPMU_FEATURE_INTEL_BTS 1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+ /* Interrupted VCPU */
+ uint32_t vcpu_id;
+
+ /*
+ * Physical processor on which the interrupt occurred. On non-privileged
+ * guests set to vcpu_id;
+ */
+ uint32_t pcpu_id;
+
+ /*
+ * Domain that was interrupted. On non-privileged guests set to
+ * DOMID_SELF.
+ * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+ * XENPMU_MODE_ALL mode, domain ID of another domain.
+ */
+ domid_t domain_id;
+
+ uint8_t pad[6];
+
+ /* Architecture-specific information */
+ struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_XENPMU_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index c5ed20bb3..1daae485e 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,14 +3,14 @@
#include <asm/xen/page.h>
-static inline unsigned long page_to_mfn(struct page *page)
+static inline unsigned long xen_page_to_gfn(struct page *page)
{
- return pfn_to_mfn(page_to_pfn(page));
+ return pfn_to_gfn(page_to_pfn(page));
}
struct xen_memory_region {
- phys_addr_t start;
- phys_addr_t size;
+ unsigned long start_pfn;
+ unsigned long n_pfns;
};
#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0ce4f3201..e4e214a5a 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
/*
- * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages);
-/* xen_remap_domain_mfn_range() - map a range of foreign frames
+/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
* @gfn: First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,