summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h85
-rw-r--r--include/linux/alarmtimer.h6
-rw-r--r--include/linux/ata.h41
-rw-r--r--include/linux/ath9k_platform.h1
-rw-r--r--include/linux/atomic.h747
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/balloon_compaction.h51
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h3
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio.h97
-rw-r--r--include/linux/bitmap.h9
-rw-r--r--include/linux/blk-cgroup.h13
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blk_types.h105
-rw-r--r--include/linux/blkdev.h91
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/bpf.h45
-rw-r--r--include/linux/buffer_head.h14
-rw-r--r--include/linux/bvec.h97
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/capability.h1
-rw-r--r--include/linux/cec-funcs.h1971
-rw-r--r--include/linux/cec.h1014
-rw-r--r--include/linux/ceph/ceph_fs.h55
-rw-r--r--include/linux/ceph/decode.h55
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/mon_client.h7
-rw-r--r--include/linux/ceph/msgpool.h1
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h10
-rw-r--r--include/linux/ceph/string_table.h62
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/clk.h20
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/compaction.h34
-rw-r--r--include/linux/compiler-gcc.h10
-rw-r--r--include/linux/compiler.h24
-rw-r--r--include/linux/console.h13
-rw-r--r--include/linux/console_struct.h38
-rw-r--r--include/linux/context_tracking.h53
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/cpufreq.h289
-rw-r--r--include/linux/cpuhotplug.h78
-rw-r--r--include/linux/cpuidle.h18
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/crypto.h31
-rw-r--r--include/linux/dax.h5
-rw-r--r--include/linux/dcache.h45
-rw-r--r--include/linux/debugobjects.h2
-rw-r--r--include/linux/device-mapper.h26
-rw-r--r--include/linux/dm-io.h3
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-iommu.h6
-rw-r--r--include/linux/dma-mapping.h130
-rw-r--r--include/linux/dma/hsu.h14
-rw-r--r--include/linux/drbd.h10
-rw-r--r--include/linux/drbd_genl.h7
-rw-r--r--include/linux/drbd_limits.h15
-rw-r--r--include/linux/dynamic_debug.h60
-rw-r--r--include/linux/efi.h201
-rw-r--r--include/linux/elevator.h15
-rw-r--r--include/linux/etherdevice.h23
-rw-r--r--include/linux/export.h2
-rw-r--r--include/linux/exportfs.h16
-rw-r--r--include/linux/extable.h32
-rw-r--r--include/linux/extcon.h115
-rw-r--r--include/linux/extcon/extcon-adc-jack.h2
-rw-r--r--include/linux/fence-array.h73
-rw-r--r--include/linux/fence.h19
-rw-r--r--include/linux/filter.h18
-rw-r--r--include/linux/firmware.h8
-rw-r--r--include/linux/frontswap.h34
-rw-r--r--include/linux/fs.h163
-rw-r--r--include/linux/fscrypto.h5
-rw-r--r--include/linux/fsnotify.h12
-rw-r--r--include/linux/fsnotify_backend.h20
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/gfp.h30
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/hrtimer.h7
-rw-r--r--include/linux/hsi/hsi.h2
-rw-r--r--include/linux/huge_mm.h38
-rw-r--r--include/linux/i2c-smbus.h29
-rw-r--r--include/linux/i2c.h15
-rw-r--r--include/linux/icmpv6.h5
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/ieee802154.h29
-rw-r--r--include/linux/iio/common/st_sensors.h6
-rw-r--r--include/linux/iio/iio.h22
-rw-r--r--include/linux/iio/sw_device.h70
-rw-r--r--include/linux/init.h6
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/input/touchscreen.h21
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/io-mapping.h10
-rw-r--r--include/linux/iomap.h76
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/ipv6.h7
-rw-r--r--include/linux/irq.h16
-rw-r--r--include/linux/irqchip/arm-gic-v3.h216
-rw-r--r--include/linux/irqchip/arm-gic.h11
-rw-r--r--include/linux/irqdomain.h12
-rw-r--r--include/linux/jbd2.h23
-rw-r--r--include/linux/jump_label.h46
-rw-r--r--include/linux/kasan.h5
-rw-r--r--include/linux/kconfig.h31
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kexec.h46
-rw-r--r--include/linux/khugepaged.h5
-rw-r--r--include/linux/ksm.h47
-rw-r--r--include/linux/kvm_host.h82
-rw-r--r--include/linux/leds-lp3952.h125
-rw-r--r--include/linux/leds-pca9532.h9
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/libata.h60
-rw-r--r--include/linux/libnvdimm.h24
-rw-r--r--include/linux/lightnvm.h34
-rw-r--r--include/linux/list.h10
-rw-r--r--include/linux/lsm_hooks.h2
-rw-r--r--include/linux/mailbox/brcm-message.h56
-rw-r--r--include/linux/mbus.h2
-rw-r--r--include/linux/mc146818rtc.h5
-rw-r--r--include/linux/mdio-mux.h4
-rw-r--r--include/linux/memblock.h21
-rw-r--r--include/linux/memcontrol.h173
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/memremap.h2
-rw-r--r--include/linux/mfd/88pm80x.h4
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h6
-rw-r--r--include/linux/mfd/altera-a10sr.h85
-rw-r--r--include/linux/mfd/arizona/core.h10
-rw-r--r--include/linux/mfd/arizona/registers.h6
-rw-r--r--include/linux/mfd/cros_ec_commands.h31
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h153
-rw-r--r--include/linux/mfd/dbx500-prcmu.h10
-rw-r--r--include/linux/mfd/hi655x-pmic.h25
-rw-r--r--include/linux/mfd/rn5t618.h13
-rw-r--r--include/linux/mfd/stmpe.h22
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h2
-rw-r--r--include/linux/mfd/tps65217.h1
-rw-r--r--include/linux/mfd/tps65218.h2
-rw-r--r--include/linux/mfd/twl6040.h5
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h17
-rw-r--r--include/linux/mlx4/device.h9
-rw-r--r--include/linux/mlx4/qp.h18
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h54
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h294
-rw-r--r--include/linux/mlx5/port.h16
-rw-r--r--include/linux/mlx5/srq.h25
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h71
-rw-r--r--include/linux/mm_inline.h19
-rw-r--r--include/linux/mm_types.h81
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/card.h36
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h14
-rw-r--r--include/linux/mmc/mmc.h3
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h199
-rw-r--r--include/linux/mod_devicetable.h16
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/mount.h1
-rw-r--r--include/linux/mpi.h3
-rw-r--r--include/linux/mroute.h3
-rw-r--r--include/linux/mroute6.h3
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/spi-nor.h8
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/nd.h3
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h159
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h4
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/nfs4.h11
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nfs_xdr.h24
-rw-r--r--include/linux/nvme-rdma.h71
-rw-r--r--include/linux/nvme.h406
-rw-r--r--include/linux/nvmem-consumer.h2
-rw-r--r--include/linux/of.h19
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/of_iommu.h2
-rw-r--r--include/linux/of_mdio.h18
-rw-r--r--include/linux/of_reserved_mem.h25
-rw-r--r--include/linux/oom.h36
-rw-r--r--include/linux/page-flags.h153
-rw-r--r--include/linux/page_ext.h4
-rw-r--r--include/linux/page_owner.h12
-rw-r--r--include/linux/page_ref.h9
-rw-r--r--include/linux/pagemap.h9
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--include/linux/pci-ecam.h67
-rw-r--r--include/linux/pci.h105
-rw-r--r--include/linux/percpu-refcount.h12
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h83
-rw-r--r--include/linux/pfn_t.h5
-rw-r--r--include/linux/phy/phy.h17
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h4
-rw-r--r--include/linux/platform_data/at24.h11
-rw-r--r--include/linux/platform_data/b53.h33
-rw-r--r--include/linux/platform_data/media/ir-rx51.h3
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/omapdss.h37
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/rtc-m48t86.h16
-rw-r--r--include/linux/platform_data/rtc-v3020.h41
-rw-r--r--include/linux/platform_data/sht3x.h25
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h1
-rw-r--r--include/linux/platform_data/st33zp24.h2
-rw-r--r--include/linux/pm_clock.h1
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pmem.h117
-rw-r--r--include/linux/posix_acl.h2
-rw-r--r--include/linux/power/max8903_charger.h6
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/printk.h29
-rw-r--r--include/linux/property.h5
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/ptr_ring.h448
-rw-r--r--include/linux/pwm.h113
-rw-r--r--include/linux/qcom_scm.h8
-rw-r--r--include/linux/qed/common_hsi.h397
-rw-r--r--include/linux/qed/eth_common.h124
-rw-r--r--include/linux/qed/iscsi_common.h1439
-rw-r--r--include/linux/qed/qed_chain.h556
-rw-r--r--include/linux/qed/qed_eth_if.h63
-rw-r--r--include/linux/qed/qed_if.h190
-rw-r--r--include/linux/qed/rdma_common.h44
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h226
-rw-r--r--include/linux/quota.h14
-rw-r--r--include/linux/radix-tree.h3
-rw-r--r--include/linux/random.h12
-rw-r--r--include/linux/ratelimit.h38
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rbtree_augmented.h13
-rw-r--r--include/linux/rcupdate.h31
-rw-r--r--include/linux/rcutiny.h7
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/regmap.h49
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/da9211.h5
-rw-r--r--include/linux/regulator/mt6323-regulator.h52
-rw-r--r--include/linux/reset-controller.h4
-rw-r--r--include/linux/rio.h13
-rw-r--r--include/linux/rio_ids.h2
-rw-r--r--include/linux/rio_regs.h167
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtc/ds1286.h52
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/rxrpc.h21
-rw-r--r--include/linux/sched.h121
-rw-r--r--include/linux/scpi_protocol.h2
-rw-r--r--include/linux/sctp.h64
-rw-r--r--include/linux/seccomp.h14
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/serial_core.h8
-rw-r--r--include/linux/sfi.h1
-rw-r--r--include/linux/shmem_fs.h50
-rw-r--r--include/linux/skb_array.h178
-rw-r--r--include/linux/skbuff.h70
-rw-r--r--include/linux/slab.h14
-rw-r--r--include/linux/slab_def.h5
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h8
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h2
-rw-r--r--include/linux/spi/spi.h10
-rw-r--r--include/linux/spinlock_up.h10
-rw-r--r--include/linux/sradix-tree.h77
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/stringhash.h12
-rw-r--r--include/linux/sunrpc/auth.h9
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/sunrpc/sched.h5
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/svcauth.h4
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/sunrpc/xprtsock.h1
-rw-r--r--include/linux/suspend.h73
-rw-r--r--include/linux/swap.h32
-rw-r--r--include/linux/swiotlb.h10
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/thread_info.h60
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/timer.h40
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h5
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/uidgid.h4
-rw-r--r--include/linux/uksm.h149
-rw-r--r--include/linux/usb/gadget.h589
-rw-r--r--include/linux/usb/of.h4
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/userfaultfd_k.h8
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/linux/virtio_net.h101
-rw-r--r--include/linux/virtio_vsock.h154
-rw-r--r--include/linux/vm_event_item.h21
-rw-r--r--include/linux/vmstat.h111
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/linux/vtime.h50
-rw-r--r--include/linux/wait.h13
-rw-r--r--include/linux/watchdog.h6
-rw-r--r--include/linux/wbt.h75
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/writeback.h9
-rw-r--r--include/linux/ww_mutex.h4
335 files changed, 13891 insertions, 3474 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 47f950856..c5eaf2f80 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -208,7 +208,6 @@ void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
-void early_acpi_table_init(void *data, size_t size);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_parse_entries(char *id, unsigned long table_size,
@@ -232,12 +231,26 @@ int acpi_table_parse_madt(enum acpi_madt_type id,
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following four functions are architecture-dependent */
+/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
+
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
+#else
+static inline void
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
+#endif
+
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+
+#ifdef CONFIG_ARM64
+void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
+#else
+static inline void
+acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
+#endif
+
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
-void acpi_numa_arch_fixup(void);
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
@@ -444,8 +457,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008
#define OSC_SB_APEI_SUPPORT 0x00000010
#define OSC_SB_CPC_SUPPORT 0x00000020
+#define OSC_SB_CPCV2_SUPPORT 0x00000040
+#define OSC_SB_PCLPI_SUPPORT 0x00000080
+#define OSC_SB_OSLPI_SUPPORT 0x00000100
extern bool osc_sb_apei_support_acked;
+extern bool osc_pc_lpi_support_confirmed;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -532,6 +549,24 @@ void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *);
#define ACPI_PTR(_ptr) (_ptr)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev)
+{
+ adev->flags.visited = true;
+}
+
+static inline void acpi_device_clear_enumerated(struct acpi_device *adev)
+{
+ adev->flags.visited = false;
+}
+
+enum acpi_reconfig_event {
+ ACPI_RECONFIG_DEVICE_ADD = 0,
+ ACPI_RECONFIG_DEVICE_REMOVE,
+};
+
+int acpi_reconfig_notifier_register(struct notifier_block *nb);
+int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -543,6 +578,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
struct fwnode_handle;
+static inline bool acpi_dev_found(const char *hid)
+{
+ return false;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -568,6 +608,12 @@ static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwn
return NULL;
}
+static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ return false;
+}
+
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return NULL;
@@ -588,7 +634,6 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return NULL;
}
-static inline void early_acpi_table_init(void *data, size_t size) { }
static inline void acpi_early_init(void) { }
static inline void acpi_subsystem_init(void) { }
@@ -654,6 +699,14 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
+ const u8 *uuid,
+ int rev, int func,
+ union acpi_object *argv4)
+{
+ return NULL;
+}
+
static inline int acpi_device_uevent_modalias(struct device *dev,
struct kobj_uevent_env *env)
{
@@ -678,6 +731,24 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
#define ACPI_PTR(_ptr) (NULL)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev)
+{
+}
+
+static inline void acpi_device_clear_enumerated(struct acpi_device *adev)
+{
+}
+
+static inline int acpi_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
+static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI
@@ -997,4 +1068,10 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
#define acpi_probe_device_table(t) ({ int __r = 0; __r;})
#endif
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+void acpi_table_upgrade(void);
+#else
+static inline void acpi_table_upgrade(void) { }
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 52f3b7da4..9d8031257 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -26,10 +26,10 @@ enum alarmtimer_restart {
* struct alarm - Alarm timer structure
* @node: timerqueue node for adding to the event list this value
* also includes the expiration time.
- * @period: Period for recuring alarms
+ * @timer: hrtimer used to schedule events while running
* @function: Function pointer to be executed when the timer fires.
- * @type: Alarm type (BOOTTIME/REALTIME)
- * @enabled: Flag that represents if the alarm is set to fire or not
+ * @type: Alarm type (BOOTTIME/REALTIME).
+ * @state: Flag that represents if the alarm is set to fire or not.
* @data: Internal data value.
*/
struct alarm {
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 99346be5a..adbc812c0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -46,8 +46,9 @@ enum {
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
- ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
+ ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
+ ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
ATA_ID_WORDS = 256,
ATA_ID_CONFIG = 0,
@@ -409,6 +410,9 @@ enum {
SETFEATURES_WC_ON = 0x02, /* Enable write cache */
SETFEATURES_WC_OFF = 0x82, /* Disable write cache */
+ SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */
+ SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */
+
/* Enable/Disable Automatic Acoustic Management */
SETFEATURES_AAM_ON = 0x42,
SETFEATURES_AAM_OFF = 0xC2,
@@ -519,16 +523,23 @@ enum {
SERR_DEV_XCHG = (1 << 26), /* device exchanged */
};
-enum ata_tf_protocols {
- /* ATA taskfile protocols */
- ATA_PROT_UNKNOWN, /* unknown/invalid */
- ATA_PROT_NODATA, /* no data */
- ATA_PROT_PIO, /* PIO data xfer */
- ATA_PROT_DMA, /* DMA */
- ATA_PROT_NCQ, /* NCQ */
- ATAPI_PROT_NODATA, /* packet command, no data */
- ATAPI_PROT_PIO, /* packet command, PIO data xfer*/
- ATAPI_PROT_DMA, /* packet command with special DMA sauce */
+enum ata_prot_flags {
+ /* protocol flags */
+ ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
+ ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
+ ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
+ ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
+
+ /* taskfile protocols */
+ ATA_PROT_UNKNOWN = (u8)-1,
+ ATA_PROT_NODATA = 0,
+ ATA_PROT_PIO = ATA_PROT_FLAG_PIO,
+ ATA_PROT_DMA = ATA_PROT_FLAG_DMA,
+ ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ,
+ ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ,
+ ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI,
+ ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO,
+ ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA,
};
enum ata_ioctls {
@@ -1066,12 +1077,12 @@ static inline void ata_id_to_hd_driveid(u16 *id)
* TO NV CACHE PINNED SET.
*/
static inline unsigned ata_set_lba_range_entries(void *_buffer,
- unsigned buf_size, u64 sector, unsigned long count)
+ unsigned num, u64 sector, unsigned long count)
{
__le64 *buffer = _buffer;
unsigned i = 0, used_bytes;
- while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */
+ while (i < num) {
u64 entry = sector |
((u64)(count > 0xffff ? 0xffff : count) << 48);
buffer[i++] = __cpu_to_le64(entry);
@@ -1095,13 +1106,13 @@ static inline bool ata_ok(u8 status)
static inline bool lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number: must be LESS THAN 0x0fffffff */
- return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
+ return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS);
}
static inline bool lba_48_ok(u64 block, u32 n_block)
{
/* check the ending block number */
- return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
+ return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48);
}
#define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff)
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index e66153d60..76860a461 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -40,6 +40,7 @@ struct ath9k_platform_data {
bool tx_gain_buffalo;
bool disable_2ghz;
bool disable_5ghz;
+ bool led_active_high;
int (*get_mac_revision)(void);
int (*external_reset)(void);
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e451534fe..e71835bf6 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -163,206 +163,265 @@
#endif
#endif /* atomic_dec_return_relaxed */
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_relaxed atomic_xchg
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#else /* atomic_xchg_relaxed */
+/* atomic_fetch_add_relaxed */
+#ifndef atomic_fetch_add_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add
+#define atomic_fetch_add_acquire atomic_fetch_add
+#define atomic_fetch_add_release atomic_fetch_add
-#ifndef atomic_xchg_acquire
-#define atomic_xchg_acquire(...) \
- __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#else /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_fetch_add_acquire
+#define atomic_fetch_add_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
#endif
-#ifndef atomic_xchg_release
-#define atomic_xchg_release(...) \
- __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#ifndef atomic_fetch_add_release
+#define atomic_fetch_add_release(...) \
+ __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
#endif
-#ifndef atomic_xchg
-#define atomic_xchg(...) \
- __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#ifndef atomic_fetch_add
+#define atomic_fetch_add(...) \
+ __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_add_relaxed */
+
+/* atomic_fetch_inc_relaxed */
+#ifndef atomic_fetch_inc_relaxed
+
+#ifndef atomic_fetch_inc
+#define atomic_fetch_inc(v) atomic_fetch_add(1, (v))
+#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v))
+#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v))
+#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v))
+#else /* atomic_fetch_inc */
+#define atomic_fetch_inc_relaxed atomic_fetch_inc
+#define atomic_fetch_inc_acquire atomic_fetch_inc
+#define atomic_fetch_inc_release atomic_fetch_inc
+#endif /* atomic_fetch_inc */
+
+#else /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_fetch_inc_acquire
+#define atomic_fetch_inc_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
#endif
-#endif /* atomic_xchg_relaxed */
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
+#ifndef atomic_fetch_inc_release
+#define atomic_fetch_inc_release(...) \
+ __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
+#endif
-#else /* atomic_cmpxchg_relaxed */
+#ifndef atomic_fetch_inc
+#define atomic_fetch_inc(...) \
+ __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_inc_relaxed */
-#ifndef atomic_cmpxchg_acquire
-#define atomic_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+/* atomic_fetch_sub_relaxed */
+#ifndef atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub
+#define atomic_fetch_sub_acquire atomic_fetch_sub
+#define atomic_fetch_sub_release atomic_fetch_sub
+
+#else /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_fetch_sub_acquire
+#define atomic_fetch_sub_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
#endif
-#ifndef atomic_cmpxchg_release
-#define atomic_cmpxchg_release(...) \
- __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#ifndef atomic_fetch_sub_release
+#define atomic_fetch_sub_release(...) \
+ __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
#endif
-#ifndef atomic_cmpxchg
-#define atomic_cmpxchg(...) \
- __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#ifndef atomic_fetch_sub
+#define atomic_fetch_sub(...) \
+ __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_sub_relaxed */
+
+/* atomic_fetch_dec_relaxed */
+#ifndef atomic_fetch_dec_relaxed
+
+#ifndef atomic_fetch_dec
+#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v))
+#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v))
+#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v))
+#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v))
+#else /* atomic_fetch_dec */
+#define atomic_fetch_dec_relaxed atomic_fetch_dec
+#define atomic_fetch_dec_acquire atomic_fetch_dec
+#define atomic_fetch_dec_release atomic_fetch_dec
+#endif /* atomic_fetch_dec */
+
+#else /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_dec_acquire
+#define atomic_fetch_dec_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
#endif
-#endif /* atomic_cmpxchg_relaxed */
-#ifndef atomic64_read_acquire
-#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#ifndef atomic_fetch_dec_release
+#define atomic_fetch_dec_release(...) \
+ __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
#endif
-#ifndef atomic64_set_release
-#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
+#ifndef atomic_fetch_dec
+#define atomic_fetch_dec(...) \
+ __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
#endif
+#endif /* atomic_fetch_dec_relaxed */
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
+/* atomic_fetch_or_relaxed */
+#ifndef atomic_fetch_or_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or
+#define atomic_fetch_or_acquire atomic_fetch_or
+#define atomic_fetch_or_release atomic_fetch_or
-#else /* atomic64_add_return_relaxed */
+#else /* atomic_fetch_or_relaxed */
-#ifndef atomic64_add_return_acquire
-#define atomic64_add_return_acquire(...) \
- __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or_acquire
+#define atomic_fetch_or_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
#endif
-#ifndef atomic64_add_return_release
-#define atomic64_add_return_release(...) \
- __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or_release
+#define atomic_fetch_or_release(...) \
+ __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
#endif
-#ifndef atomic64_add_return
-#define atomic64_add_return(...) \
- __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or
+#define atomic_fetch_or(...) \
+ __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
#endif
-#endif /* atomic64_add_return_relaxed */
+#endif /* atomic_fetch_or_relaxed */
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
+/* atomic_fetch_and_relaxed */
+#ifndef atomic_fetch_and_relaxed
+#define atomic_fetch_and_relaxed atomic_fetch_and
+#define atomic_fetch_and_acquire atomic_fetch_and
+#define atomic_fetch_and_release atomic_fetch_and
-#else /* atomic64_inc_return_relaxed */
+#else /* atomic_fetch_and_relaxed */
-#ifndef atomic64_inc_return_acquire
-#define atomic64_inc_return_acquire(...) \
- __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and_acquire
+#define atomic_fetch_and_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
#endif
-#ifndef atomic64_inc_return_release
-#define atomic64_inc_return_release(...) \
- __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and_release
+#define atomic_fetch_and_release(...) \
+ __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
#endif
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(...) \
- __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and
+#define atomic_fetch_and(...) \
+ __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
#endif
-#endif /* atomic64_inc_return_relaxed */
-
+#endif /* atomic_fetch_and_relaxed */
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
+#ifdef atomic_andnot
+/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_fetch_andnot_relaxed
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
-#else /* atomic64_sub_return_relaxed */
+#else /* atomic_fetch_andnot_relaxed */
-#ifndef atomic64_sub_return_acquire
-#define atomic64_sub_return_acquire(...) \
- __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot_acquire
+#define atomic_fetch_andnot_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
#endif
-#ifndef atomic64_sub_return_release
-#define atomic64_sub_return_release(...) \
- __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot_release
+#define atomic_fetch_andnot_release(...) \
+ __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
#endif
-#ifndef atomic64_sub_return
-#define atomic64_sub_return(...) \
- __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(...) \
+ __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
-#endif /* atomic64_sub_return_relaxed */
+#endif /* atomic_fetch_andnot_relaxed */
+#endif /* atomic_andnot */
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
+/* atomic_fetch_xor_relaxed */
+#ifndef atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor
+#define atomic_fetch_xor_acquire atomic_fetch_xor
+#define atomic_fetch_xor_release atomic_fetch_xor
-#else /* atomic64_dec_return_relaxed */
+#else /* atomic_fetch_xor_relaxed */
-#ifndef atomic64_dec_return_acquire
-#define atomic64_dec_return_acquire(...) \
- __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor_acquire
+#define atomic_fetch_xor_acquire(...) \
+ __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
#endif
-#ifndef atomic64_dec_return_release
-#define atomic64_dec_return_release(...) \
- __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor_release
+#define atomic_fetch_xor_release(...) \
+ __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
#endif
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(...) \
- __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor
+#define atomic_fetch_xor(...) \
+ __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
#endif
-#endif /* atomic64_dec_return_relaxed */
+#endif /* atomic_fetch_xor_relaxed */
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_relaxed atomic64_xchg
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#else /* atomic64_xchg_relaxed */
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_relaxed atomic_xchg
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
-#ifndef atomic64_xchg_acquire
-#define atomic64_xchg_acquire(...) \
- __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define atomic_xchg_acquire(...) \
+ __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
-#ifndef atomic64_xchg_release
-#define atomic64_xchg_release(...) \
- __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#ifndef atomic_xchg_release
+#define atomic_xchg_release(...) \
+ __atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
-#ifndef atomic64_xchg
-#define atomic64_xchg(...) \
- __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#ifndef atomic_xchg
+#define atomic_xchg(...) \
+ __atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
-#endif /* atomic64_xchg_relaxed */
+#endif /* atomic_xchg_relaxed */
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
+#else /* atomic_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_acquire
-#define atomic64_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg_acquire
+#define atomic_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
-#ifndef atomic64_cmpxchg_release
-#define atomic64_cmpxchg_release(...) \
- __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg_release
+#define atomic_cmpxchg_release(...) \
+ __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
-#ifndef atomic64_cmpxchg
-#define atomic64_cmpxchg(...) \
- __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(...) \
+ __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
-#endif /* atomic64_cmpxchg_relaxed */
+#endif /* atomic_cmpxchg_relaxed */
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
@@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
-#endif
-static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+static inline int atomic_fetch_andnot(int i, atomic_t *v)
{
- atomic_andnot(mask, v);
+ return atomic_fetch_and(~i, v);
}
-static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- atomic_or(mask, v);
+ return atomic_fetch_and_relaxed(~i, v);
}
+static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return atomic_fetch_and_acquire(~i, v);
+}
+
+static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return atomic_fetch_and_release(~i, v);
+}
+#endif
+
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-/**
- * atomic_fetch_or - perform *p |= mask and return old value of *p
- * @mask: mask to OR on the atomic_t
- * @p: pointer to atomic_t
- */
-#ifndef atomic_fetch_or
-static inline int atomic_fetch_or(int mask, atomic_t *p)
-{
- int old, val = atomic_read(p);
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
- for (;;) {
- old = atomic_cmpxchg(p, val, val | mask);
- if (old == val)
- break;
- val = old;
- }
+#ifndef atomic64_read_acquire
+#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
+#endif
- return old;
-}
+#ifndef atomic64_set_release
+#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_relaxed atomic64_add_return
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define atomic64_add_return_acquire(...) \
+ __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define atomic64_add_return_release(...) \
+ __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_inc_return_relaxed */
+#ifndef atomic64_inc_return_relaxed
+#define atomic64_inc_return_relaxed atomic64_inc_return
+#define atomic64_inc_return_acquire atomic64_inc_return
+#define atomic64_inc_return_release atomic64_inc_return
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+#define atomic64_inc_return_acquire(...) \
+ __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return_release
+#define atomic64_inc_return_release(...) \
+ __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(...) \
+ __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_inc_return_relaxed */
+
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define atomic64_sub_return_acquire(...) \
+ __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
+#ifndef atomic64_sub_return_release
+#define atomic64_sub_return_release(...) \
+ __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define atomic64_sub_return(...) \
+ __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_dec_return_relaxed */
+#ifndef atomic64_dec_return_relaxed
+#define atomic64_dec_return_relaxed atomic64_dec_return
+#define atomic64_dec_return_acquire atomic64_dec_return
+#define atomic64_dec_return_release atomic64_dec_return
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+#define atomic64_dec_return_acquire(...) \
+ __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return_release
+#define atomic64_dec_return_release(...) \
+ __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(...) \
+ __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_dec_return_relaxed */
+
+
+/* atomic64_fetch_add_relaxed */
+#ifndef atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add
+#define atomic64_fetch_add_acquire atomic64_fetch_add
+#define atomic64_fetch_add_release atomic64_fetch_add
+
+#else /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_fetch_add_acquire
+#define atomic64_fetch_add_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_add_release
+#define atomic64_fetch_add_release(...) \
+ __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_add
+#define atomic64_fetch_add(...) \
+ __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_add_relaxed */
+
+/* atomic64_fetch_inc_relaxed */
+#ifndef atomic64_fetch_inc_relaxed
+
+#ifndef atomic64_fetch_inc
+#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v))
+#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v))
+#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v))
+#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v))
+#else /* atomic64_fetch_inc */
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc
+#define atomic64_fetch_inc_release atomic64_fetch_inc
+#endif /* atomic64_fetch_inc */
+
+#else /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_fetch_inc_acquire
+#define atomic64_fetch_inc_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_inc_release
+#define atomic64_fetch_inc_release(...) \
+ __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_inc
+#define atomic64_fetch_inc(...) \
+ __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_inc_relaxed */
+
+/* atomic64_fetch_sub_relaxed */
+#ifndef atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub
+#define atomic64_fetch_sub_release atomic64_fetch_sub
+
+#else /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_fetch_sub_acquire
+#define atomic64_fetch_sub_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_sub_release
+#define atomic64_fetch_sub_release(...) \
+ __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_sub
+#define atomic64_fetch_sub(...) \
+ __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_sub_relaxed */
+
+/* atomic64_fetch_dec_relaxed */
+#ifndef atomic64_fetch_dec_relaxed
+
+#ifndef atomic64_fetch_dec
+#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v))
+#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v))
+#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v))
+#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v))
+#else /* atomic64_fetch_dec */
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec
+#define atomic64_fetch_dec_release atomic64_fetch_dec
+#endif /* atomic64_fetch_dec */
+
+#else /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_dec_acquire
+#define atomic64_fetch_dec_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_dec_release
+#define atomic64_fetch_dec_release(...) \
+ __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_dec
+#define atomic64_fetch_dec(...) \
+ __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_dec_relaxed */
+
+/* atomic64_fetch_or_relaxed */
+#ifndef atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or
+#define atomic64_fetch_or_acquire atomic64_fetch_or
+#define atomic64_fetch_or_release atomic64_fetch_or
+
+#else /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_or_acquire
+#define atomic64_fetch_or_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_or_release
+#define atomic64_fetch_or_release(...) \
+ __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_or
+#define atomic64_fetch_or(...) \
+ __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_or_relaxed */
+
+/* atomic64_fetch_and_relaxed */
+#ifndef atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_relaxed atomic64_fetch_and
+#define atomic64_fetch_and_acquire atomic64_fetch_and
+#define atomic64_fetch_and_release atomic64_fetch_and
+
+#else /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_fetch_and_acquire
+#define atomic64_fetch_and_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_and_release
+#define atomic64_fetch_and_release(...) \
+ __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_and
+#define atomic64_fetch_and(...) \
+ __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_and_relaxed */
+
+#ifdef atomic64_andnot
+/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#else /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_andnot_acquire
+#define atomic64_fetch_andnot_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+#define atomic64_fetch_andnot_release(...) \
+ __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(...) \
+ __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_andnot_relaxed */
+#endif /* atomic64_andnot */
+
+/* atomic64_fetch_xor_relaxed */
+#ifndef atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor
+#define atomic64_fetch_xor_release atomic64_fetch_xor
+
+#else /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_fetch_xor_acquire
+#define atomic64_fetch_xor_acquire(...) \
+ __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_xor_release
+#define atomic64_fetch_xor_release(...) \
+ __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_xor
+#define atomic64_fetch_xor(...) \
+ __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_xor_relaxed */
+
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_relaxed atomic64_xchg
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define atomic64_xchg_acquire(...) \
+ __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define atomic64_xchg_release(...) \
+ __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define atomic64_cmpxchg_acquire(...) \
+ __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define atomic64_cmpxchg_release(...) \
+ __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
atomic64_and(~i, v);
}
+
+static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+{
+ return atomic64_fetch_and(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+{
+ return atomic64_fetch_and_relaxed(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+{
+ return atomic64_fetch_and_acquire(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+{
+ return atomic64_fetch_and_release(~i, v);
+}
#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index e38e3fc13..9d4443f93 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -163,8 +163,6 @@ extern void audit_log_task_info(struct audit_buffer *ab,
extern int audit_update_lsm_rules(void);
/* Private API (for audit.c only) */
-extern int audit_filter_user(int type);
-extern int audit_filter_type(int type);
extern int audit_rule_change(int type, __u32 portid, int seq,
void *data, size_t datasz);
extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 5a5658857..dc5f76d7f 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,7 +116,7 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
- atomic_t dirty_sleeping; /* waiting on dirty limit exceeded */
+ unsigned long dirty_sleep; /* last wait */
struct list_head bdi_node; /* anchored at bdi->wb_list */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 89d3de3e0..43b93a947 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -198,7 +198,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
}
long congestion_wait(int sync, long timeout);
-long wait_iff_congested(struct zone *zone, int sync, long timeout);
+long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
int pdflush_proc_obsolete(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 9b0a15d06..79542b269 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -48,6 +48,7 @@
#include <linux/migrate.h>
#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/fs.h>
/*
* Balloon device information descriptor.
@@ -62,6 +63,7 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
+ struct inode *inode;
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
@@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
+ balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern bool balloon_page_isolate(struct page *page);
+extern const struct address_space_operations balloon_aops;
+extern bool balloon_page_isolate(struct page *page,
+ isolate_mode_t mode);
extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct page *newpage,
+extern int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage,
struct page *page, enum migrate_mode mode);
/*
- * __is_movable_balloon_page - helper to perform @page PageBalloon tests
- */
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
- * balloon_page_movable - test PageBalloon to identify balloon pages
- * and PagePrivate to check that the page is not
- * isolated and can be moved by compaction/migration.
- *
- * As we might return false positives in the case of a balloon page being just
- * released under us, this need to be re-tested later, under the page lock.
- */
-static inline bool balloon_page_movable(struct page *page)
-{
- return PageBalloon(page) && PagePrivate(page);
-}
-
-/*
- * isolated_balloon_page - identify an isolated balloon page on private
- * compaction/migration page lists.
- */
-static inline bool isolated_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
@@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageBalloon(page);
- SetPagePrivate(page);
+ __SetPageMovable(page, balloon->inode->i_mapping);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
static inline void balloon_page_delete(struct page *page)
{
__ClearPageBalloon(page);
+ __ClearPageMovable(page);
set_page_private(page, 0);
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
+ /*
+ * No touch page.lru field once @page has been isolated
+ * because VM is using the field.
+ */
+ if (!PageIsolated(page))
list_del(&page->lru);
- }
}
/*
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index a5ac2cad5..b20e3d562 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -504,6 +504,9 @@
#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+#define BCMA_CCB_MII_MNG_CTL 0x0000
+#define BCMA_CCB_MII_MNG_CMD_DATA 0x0004
+
/* BCM4331 ChipControl numbers. */
#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */
#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 314b3caa7..1303b570b 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -113,6 +113,8 @@ extern int suid_dumpable;
extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
+extern int transfer_args_to_stack(struct linux_binprm *bprm,
+ unsigned long *sp_location);
extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 8af9ef2cc..23ddf4b46 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -32,8 +32,6 @@
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
-extern int trap_non_toi_io;
-
#define BIO_DEBUG
#ifdef BIO_DEBUG
@@ -43,44 +41,9 @@ extern int trap_non_toi_io;
#endif
#define BIO_MAX_PAGES 256
-#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
-#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
-
-/*
- * upper 16 bits of bi_rw define the io priority of this bio
- */
-#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
-#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
-#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
-
-#define bio_set_prio(bio, prio) do { \
- WARN_ON(prio >= (1 << IOPRIO_BITS)); \
- (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
- (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
-} while (0)
-
-/*
- * various member access, note that bio_data should of course not be used
- * on highmem page vectors
- */
-#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
-
-#define bvec_iter_page(bvec, iter) \
- (__bvec_iter_bvec((bvec), (iter))->bv_page)
-
-#define bvec_iter_len(bvec, iter) \
- min((iter).bi_size, \
- __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
-
-#define bvec_iter_offset(bvec, iter) \
- (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
-#define bvec_iter_bvec(bvec, iter) \
-((struct bio_vec) { \
- .bv_page = bvec_iter_page((bvec), (iter)), \
- .bv_len = bvec_iter_len((bvec), (iter)), \
- .bv_offset = bvec_iter_offset((bvec), (iter)), \
-})
+#define bio_prio(bio) (bio)->bi_ioprio
+#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
#define bio_iter_iovec(bio, iter) \
bvec_iter_bvec((bio)->bi_io_vec, (iter))
@@ -108,18 +71,26 @@ static inline bool bio_has_data(struct bio *bio)
{
if (bio &&
bio->bi_iter.bi_size &&
- !(bio->bi_rw & REQ_DISCARD))
+ bio_op(bio) != REQ_OP_DISCARD &&
+ bio_op(bio) != REQ_OP_SECURE_ERASE)
return true;
return false;
}
+static inline bool bio_no_advance_iter(struct bio *bio)
+{
+ return bio_op(bio) == REQ_OP_DISCARD ||
+ bio_op(bio) == REQ_OP_SECURE_ERASE ||
+ bio_op(bio) == REQ_OP_WRITE_SAME;
+}
+
static inline bool bio_is_rw(struct bio *bio)
{
if (!bio_has_data(bio))
return false;
- if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ if (bio_no_advance_iter(bio))
return false;
return true;
@@ -127,7 +98,7 @@ static inline bool bio_is_rw(struct bio *bio)
static inline bool bio_mergeable(struct bio *bio)
{
- if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+ if (bio->bi_opf & REQ_NOMERGE_FLAGS)
return false;
return true;
@@ -195,39 +166,12 @@ static inline void *bio_data(struct bio *bio)
#define bio_for_each_segment_all(bvl, bio, i) \
for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
-static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
- unsigned bytes)
-{
- WARN_ONCE(bytes > iter->bi_size,
- "Attempted to advance past end of bvec iter\n");
-
- while (bytes) {
- unsigned len = min(bytes, bvec_iter_len(bv, *iter));
-
- bytes -= len;
- iter->bi_size -= len;
- iter->bi_bvec_done += len;
-
- if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
- iter->bi_bvec_done = 0;
- iter->bi_idx++;
- }
- }
-}
-
-#define for_each_bvec(bvl, bio_vec, iter, start) \
- for (iter = (start); \
- (iter).bi_size && \
- ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
-
-
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
unsigned bytes)
{
iter->bi_sector += bytes >> 9;
- if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
@@ -255,10 +199,13 @@ static inline unsigned bio_segments(struct bio *bio)
* differently:
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ return 1;
+
+ if (bio_op(bio) == REQ_OP_SECURE_ERASE)
return 1;
- if (bio->bi_rw & REQ_WRITE_SAME)
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
return 1;
bio_for_each_segment(bv, bio, iter)
@@ -377,7 +324,7 @@ struct bio_integrity_payload {
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
- if (bio->bi_rw & REQ_INTEGRITY)
+ if (bio->bi_opf & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
@@ -475,7 +422,7 @@ static inline void bio_io_error(struct bio *bio)
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern int submit_bio_wait(int rw, struct bio *bio);
+extern int submit_bio_wait(struct bio *bio);
extern void bio_advance(struct bio *, unsigned);
extern void bio_init(struct bio *);
@@ -725,8 +672,6 @@ static inline void bio_inc_remaining(struct bio *bio)
* and the bvec_slabs[].
*/
#define BIO_POOL_SIZE 2
-#define BIOVEC_NR_POOLS 6
-#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
struct bio_set {
struct kmem_cache *bio_slab;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e9b0b9ab0..598bc999f 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -266,9 +266,12 @@ static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
- return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_equal(src1, src2, nbits);
+ return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+#ifdef CONFIG_S390
+ if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
+ return !memcmp(src1, src2, nbits / 8);
+#endif
+ return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index c02e66994..10648e300 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
/**
* blkg_rwstat_add - add a value to a blkg_rwstat
* @rwstat: target blkg_rwstat
- * @rw: mask of REQ_{WRITE|SYNC}
+ * @op: REQ_OP
+ * @op_flags: rq_flag_bits
* @val: value to add
*
* Add @val to @rwstat. The counters are chosen according to @rw. The
* caller is responsible for synchronizing calls to this function.
*/
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- int rw, uint64_t val)
+ int op, int op_flags, uint64_t val)
{
struct percpu_counter *cnt;
- if (rw & REQ_WRITE)
+ if (op_is_write(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
- if (rw & REQ_SYNC)
+ if (op_flags & REQ_SYNC)
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
+ blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
+ blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
}
rcu_read_unlock();
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2498fdf3a..e43bbffb5 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -96,6 +96,7 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
unsigned int, unsigned int);
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
unsigned int);
+typedef int (reinit_request_fn)(void *, struct request *);
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
@@ -145,6 +146,7 @@ struct blk_mq_ops {
*/
init_request_fn *init_request;
exit_request_fn *exit_request;
+ reinit_request_fn *reinit_request;
};
enum {
@@ -196,6 +198,8 @@ enum {
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
unsigned int flags);
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
+ unsigned int flags, unsigned int hctx_idx);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
@@ -243,6 +247,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
+int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cddd180d4..95fbfa1fe 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -6,6 +6,7 @@
#define __LINUX_BLK_TYPES_H
#include <linux/types.h>
+#include <linux/bvec.h>
struct bio_set;
struct bio;
@@ -17,28 +18,7 @@ struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
- */
-struct bio_vec {
- struct page *bv_page;
- unsigned int bv_len;
- unsigned int bv_offset;
-};
-
#ifdef CONFIG_BLOCK
-
-struct bvec_iter {
- sector_t bi_sector; /* device address in 512 byte
- sectors */
- unsigned int bi_size; /* residual I/O count */
-
- unsigned int bi_idx; /* current index into bvl_vec */
-
- unsigned int bi_bvec_done; /* number of bytes completed in
- current bvec */
-};
-
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -46,11 +26,13 @@ struct bvec_iter {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned int bi_flags; /* status, command, etc */
int bi_error;
- unsigned long bi_rw; /* bottom bits READ/WRITE,
- * top bits priority
+ unsigned int bi_opf; /* bottom bits req flags,
+ * top bits REQ_OP. Use
+ * accessors.
*/
+ unsigned short bi_flags; /* status, command, etc */
+ unsigned short bi_ioprio;
struct bvec_iter bi_iter;
@@ -107,6 +89,16 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
+#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
+#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
+
+#define bio_set_op_attrs(bio, op, op_flags) do { \
+ WARN_ON(op >= (1 << REQ_OP_BITS)); \
+ (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
+ (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
+ (bio)->bi_opf |= op_flags; \
+} while (0)
+
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
/*
@@ -120,33 +112,37 @@ struct bio {
#define BIO_QUIET 6 /* Make BIO Quiet */
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
-#define BIO_TOI 9 /* bio is TuxOnIce submitted */
/*
* Flags starting here get preserved by bio_reset() - this includes
- * BIO_POOL_IDX()
+ * BVEC_POOL_IDX()
*/
-#define BIO_RESET_BITS 14
-#define BIO_OWNS_VEC 14 /* bio_free() should free bvec */
+#define BIO_RESET_BITS 10
/*
- * top 4 bits of bio flags indicate the pool this bio came from
+ * We support 6 different bvec pools, the last one is magic in that it
+ * is backed by a mempool.
*/
-#define BIO_POOL_BITS (4)
-#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
-#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
-#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
-#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
+#define BVEC_POOL_NR 6
+#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
+
+/*
+ * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
+ * 1 to the actual index so that 0 indicates that there are no bvecs to be
+ * freed.
+ */
+#define BVEC_POOL_BITS (4)
+#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
+#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
#endif /* CONFIG_BLOCK */
/*
* Request flags. For use in the cmd_flags field of struct request, and in
- * bi_rw of struct bio. Note that some flags are only valid in either one.
+ * bi_opf of struct bio. Note that some flags are only valid in either one.
*/
enum rq_flag_bits {
/* common flags */
- __REQ_WRITE, /* not set, read. set, write */
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
@@ -154,14 +150,11 @@ enum rq_flag_bits {
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
__REQ_PRIO, /* boost priority in cfq */
- __REQ_DISCARD, /* request to discard sectors */
- __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
- __REQ_WRITE_SAME, /* write same block many times */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
- __REQ_FLUSH, /* request for cache flush */
+ __REQ_PREFLUSH, /* request for cache flush */
__REQ_BG, /* background activity */
/* bio only flags */
@@ -193,31 +186,25 @@ enum rq_flag_bits {
__REQ_NR_BITS, /* stops here */
};
-#define REQ_WRITE (1ULL << __REQ_WRITE)
#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (1ULL << __REQ_SYNC)
#define REQ_META (1ULL << __REQ_META)
#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_DISCARD (1ULL << __REQ_DISCARD)
-#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
- (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
- REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
- REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE | REQ_BG)
+ (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
+ REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_BG)
#define REQ_CLONE_MASK REQ_COMMON_MASK
-#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
-
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
@@ -235,16 +222,26 @@ enum rq_flag_bits {
#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
-#define REQ_FLUSH (1ULL << __REQ_FLUSH)
+#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
#define REQ_BG (1ULL << __REQ_BG)
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
-#define REQ_SECURE (1ULL << __REQ_SECURE)
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+enum req_op {
+ REQ_OP_READ,
+ REQ_OP_WRITE,
+ REQ_OP_DISCARD, /* request to discard sectors */
+ REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
+ REQ_OP_WRITE_SAME, /* write same block many times */
+ REQ_OP_FLUSH, /* request for cache flush */
+};
+
+#define REQ_OP_BITS 3
+
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
@@ -269,11 +266,15 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
+#define BLK_RQ_STAT_BATCH 64
+
struct blk_rq_stat {
s64 mean;
u64 min;
u64 max;
- s64 nr_samples;
+ s32 nr_samples;
+ s32 nr_batch;
+ u64 batch;
s64 time;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7199146ae..2624a0220 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -53,7 +53,6 @@ struct rq_wb;
*/
#define BLKCG_MAX_POLS 3
-struct request;
typedef void (rq_end_io_fn)(struct request *, int);
#define BLK_RL_SYNCFULL (1U << 0)
@@ -96,18 +95,17 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- unsigned long fifo_time;
+ u64 fifo_time;
};
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
- u64 cmd_flags;
+ int cpu;
unsigned cmd_type;
+ u64 cmd_flags;
unsigned long atomic_flags;
- int cpu;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
sector_t __sector; /* sector cursor */
@@ -207,6 +205,20 @@ struct request {
struct request *next_rq;
};
+#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
+#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
+
+#define req_set_op(req, op) do { \
+ WARN_ON(op >= (1 << REQ_OP_BITS)); \
+ (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
+ (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
+} while (0)
+
+#define req_set_op_attrs(req, op, flags) do { \
+ req_set_op(req, op); \
+ (req)->cmd_flags |= flags; \
+} while (0)
+
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -497,7 +509,7 @@ struct request_queue {
#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
@@ -506,6 +518,7 @@ struct request_queue {
#define QUEUE_FLAG_WC 23 /* Write back caching */
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
+#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -593,8 +606,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
-#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
- test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_secure_erase(q) \
+ (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
+#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -611,7 +625,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
/*
* Driver can handle struct request, if it either has an old style
@@ -630,14 +644,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
/*
* We regard a request as sync, if either a read or a sync write
*/
-static inline bool rw_is_sync(unsigned int rw_flags)
+static inline bool rw_is_sync(int op, unsigned int rw_flags)
{
- return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
+ return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
}
static inline bool rq_is_sync(struct request *rq)
{
- return rw_is_sync(rq->cmd_flags);
+ return rw_is_sync(req_op(rq), rq->cmd_flags);
}
static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -666,22 +680,10 @@ static inline bool rq_mergeable(struct request *rq)
if (rq->cmd_type != REQ_TYPE_FS)
return false;
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ if (req_op(rq) == REQ_OP_FLUSH)
return false;
- return true;
-}
-
-static inline bool blk_check_merge_flags(unsigned int flags1,
- unsigned int flags2)
-{
- if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
- return false;
-
- if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
- return false;
-
- if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+ if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
return true;
@@ -808,8 +810,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
-extern struct request *blk_make_request(struct request_queue *, struct bio *,
- gfp_t);
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
@@ -822,6 +822,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
+extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **,
struct bio_set *);
@@ -901,12 +902,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- unsigned int cmd_flags)
+ int op)
{
- if (unlikely(cmd_flags & REQ_DISCARD))
+ if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- if (unlikely(cmd_flags & REQ_WRITE_SAME))
+ if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors;
return q->limits.max_sectors;
@@ -926,18 +927,21 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
(offset & (q->limits.chunk_sectors - 1));
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+ sector_t offset)
{
struct request_queue *q = rq->q;
if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
- return blk_queue_get_max_sectors(q, rq->cmd_flags);
+ if (!q->limits.chunk_sectors ||
+ req_op(rq) == REQ_OP_DISCARD ||
+ req_op(rq) == REQ_OP_SECURE_ERASE)
+ return blk_queue_get_max_sectors(q, req_op(rq));
- return min(blk_max_size_offset(q, blk_rq_pos(rq)),
- blk_queue_get_max_sectors(q, rq->cmd_flags));
+ return min(blk_max_size_offset(q, offset),
+ blk_queue_get_max_sectors(q, req_op(rq)));
}
static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -1158,13 +1162,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
-#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
+
+#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
+#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
+ sector_t nr_sects, gfp_t gfp_mask, int flags,
+ struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
@@ -1682,7 +1689,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
*/
struct blk_dax_ctl {
sector_t sector;
- void __pmem *addr;
+ void *addr;
long size;
pfn_t pfn;
};
@@ -1690,11 +1697,11 @@ struct blk_dax_ctl {
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t, void __pmem **,
- pfn_t *, long);
+ long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
+ long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 0f3172b8b..cceb72f9e 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
}
extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
+extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0de4de6dd..111342384 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -11,14 +11,17 @@
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
+#include <linux/err.h>
+struct perf_event;
struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
- void (*map_free)(struct bpf_map *);
+ void (*map_release)(struct bpf_map *map, struct file *map_file);
+ void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
/* funcs callable from userspace and from eBPF programs */
@@ -27,8 +30,9 @@ struct bpf_map_ops {
int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */
- void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
- void (*map_fd_put_ptr) (void *ptr);
+ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
+ int fd);
+ void (*map_fd_put_ptr)(void *ptr);
};
struct bpf_map {
@@ -189,15 +193,28 @@ struct bpf_array {
void __percpu *pptrs[0] __aligned(8);
};
};
+
#define MAX_TAIL_CALL_CNT 32
+struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+ struct file *map_file;
+ struct rcu_head rcu;
+};
+
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_fd_array_map_clear(struct bpf_map *map);
+
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
-const struct bpf_func_proto *bpf_get_event_output_proto(void);
+
+typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
+ unsigned long off, unsigned long len);
+
+u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
@@ -206,9 +223,10 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
+struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
@@ -231,8 +249,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
+
int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, u64 map_flags);
+void bpf_fd_array_map_clear(struct bpf_map *map);
+
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
* Best-effort only. No barriers here, since it _will_ race with concurrent
@@ -261,11 +284,17 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void bpf_prog_put(struct bpf_prog *prog)
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
+static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d48daa3f6..ebbacd14d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, struct buffer_head * bh[]);
+void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int rw);
-void write_dirty_buffer(struct buffer_head *bh, int rw);
-int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
-int submit_bh(int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
+void write_dirty_buffer(struct buffer_head *bh, int op_flags);
+int _submit_bh(int op, int op_flags, struct buffer_head *bh,
+ unsigned long bio_flags);
+int submit_bh(int, int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
@@ -208,6 +209,9 @@ void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
+int __block_write_full_page(struct inode *inode, struct page *page,
+ get_block_t *get_block, struct writeback_control *wbc,
+ bh_end_io_t *handler);
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
new file mode 100644
index 000000000..89b65b82d
--- /dev/null
+++ b/include/linux/bvec.h
@@ -0,0 +1,97 @@
+/*
+ * bvec iterator
+ *
+ * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ */
+#ifndef __LINUX_BVEC_ITER_H
+#define __LINUX_BVEC_ITER_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/*
+ * was unsigned short, but we might as well be ready for > 64kB I/O pages
+ */
+struct bio_vec {
+ struct page *bv_page;
+ unsigned int bv_len;
+ unsigned int bv_offset;
+};
+
+struct bvec_iter {
+ sector_t bi_sector; /* device address in 512 byte
+ sectors */
+ unsigned int bi_size; /* residual I/O count */
+
+ unsigned int bi_idx; /* current index into bvl_vec */
+
+ unsigned int bi_bvec_done; /* number of bytes completed in
+ current bvec */
+};
+
+/*
+ * various member access, note that bio_data should of course not be used
+ * on highmem page vectors
+ */
+#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter) \
+ min((iter).bi_size, \
+ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter) \
+((struct bio_vec) { \
+ .bv_page = bvec_iter_page((bvec), (iter)), \
+ .bv_len = bvec_iter_len((bvec), (iter)), \
+ .bv_offset = bvec_iter_offset((bvec), (iter)), \
+})
+
+static inline void bvec_iter_advance(const struct bio_vec *bv,
+ struct bvec_iter *iter,
+ unsigned bytes)
+{
+ WARN_ONCE(bytes > iter->bi_size,
+ "Attempted to advance past end of bvec iter\n");
+
+ while (bytes) {
+ unsigned iter_len = bvec_iter_len(bv, *iter);
+ unsigned len = min(bytes, iter_len);
+
+ bytes -= len;
+ iter->bi_size -= len;
+ iter->bi_bvec_done += len;
+
+ if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+ }
+ }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6..5f5270941 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
* CAN common private data
*/
struct can_priv {
+ struct net_device *dev;
struct can_device_stats can_stats;
struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
- struct timer_list restart_timer;
+ struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 5f3c63dde..dbc21c719 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -38,6 +38,7 @@ struct cpu_vfs_cap_data {
struct file;
struct inode;
struct dentry;
+struct task_struct;
struct user_namespace;
extern const kernel_cap_t __cap_empty_set;
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
new file mode 100644
index 000000000..138bbf721
--- /dev/null
+++ b/include/linux/cec-funcs.h
@@ -0,0 +1,1971 @@
+/*
+ * cec - HDMI Consumer Electronics Control message functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Note: this framework is still in staging and it is likely the API
+ * will change before it goes out of staging.
+ *
+ * Once it is moved out of staging this header will move to uapi.
+ */
+#ifndef _CEC_UAPI_FUNCS_H
+#define _CEC_UAPI_FUNCS_H
+
+#include <linux/cec.h>
+
+/* One Touch Play Feature */
+static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_active_source(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_image_view_on(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON;
+}
+
+static inline void cec_msg_text_view_on(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TEXT_VIEW_ON;
+}
+
+
+/* Routing Control Feature */
+static inline void cec_msg_inactive_source(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_INACTIVE_SOURCE;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_inactive_source(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_request_active_source(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
+ msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
+}
+
+static inline void cec_msg_routing_information(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_routing_information(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_routing_change(struct cec_msg *msg,
+ bool reply,
+ __u16 orig_phys_addr,
+ __u16 new_phys_addr)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
+ msg->msg[2] = orig_phys_addr >> 8;
+ msg->msg[3] = orig_phys_addr & 0xff;
+ msg->msg[4] = new_phys_addr >> 8;
+ msg->msg[5] = new_phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0;
+}
+
+static inline void cec_ops_routing_change(const struct cec_msg *msg,
+ __u16 *orig_phys_addr,
+ __u16 *new_phys_addr)
+{
+ *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5];
+}
+
+static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_set_stream_path(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Standby Feature */
+static inline void cec_msg_standby(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_STANDBY;
+}
+
+
+/* One Touch Record Feature */
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+struct cec_op_arib_data {
+ __u16 transport_id;
+ __u16 service_id;
+ __u16 orig_network_id;
+};
+
+struct cec_op_atsc_data {
+ __u16 transport_id;
+ __u16 program_number;
+};
+
+struct cec_op_dvb_data {
+ __u16 transport_id;
+ __u16 service_id;
+ __u16 orig_network_id;
+};
+
+struct cec_op_channel_data {
+ __u8 channel_number_fmt;
+ __u16 major;
+ __u16 minor;
+};
+
+struct cec_op_digital_service_id {
+ __u8 service_id_method;
+ __u8 dig_bcast_system;
+ union {
+ struct cec_op_arib_data arib;
+ struct cec_op_atsc_data atsc;
+ struct cec_op_dvb_data dvb;
+ struct cec_op_channel_data channel;
+ };
+};
+
+struct cec_op_record_src {
+ __u8 type;
+ union {
+ struct cec_op_digital_service_id digital;
+ struct {
+ __u8 ana_bcast_type;
+ __u16 ana_freq;
+ __u8 bcast_system;
+ } analog;
+ struct {
+ __u8 plug;
+ } ext_plug;
+ struct {
+ __u16 phys_addr;
+ } ext_phys_addr;
+ };
+};
+
+static inline void cec_set_digital_service_id(__u8 *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system;
+ if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+ *msg++ = (digital->channel.channel_number_fmt << 2) |
+ (digital->channel.major >> 8);
+ *msg++ = digital->channel.major & 0xff;
+ *msg++ = digital->channel.minor >> 8;
+ *msg++ = digital->channel.minor & 0xff;
+ *msg++ = 0;
+ *msg++ = 0;
+ return;
+ }
+ switch (digital->dig_bcast_system) {
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT:
+ case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T:
+ *msg++ = digital->atsc.transport_id >> 8;
+ *msg++ = digital->atsc.transport_id & 0xff;
+ *msg++ = digital->atsc.program_number >> 8;
+ *msg++ = digital->atsc.program_number & 0xff;
+ *msg++ = 0;
+ *msg++ = 0;
+ break;
+ default:
+ *msg++ = digital->dvb.transport_id >> 8;
+ *msg++ = digital->dvb.transport_id & 0xff;
+ *msg++ = digital->dvb.service_id >> 8;
+ *msg++ = digital->dvb.service_id & 0xff;
+ *msg++ = digital->dvb.orig_network_id >> 8;
+ *msg++ = digital->dvb.orig_network_id & 0xff;
+ break;
+ }
+}
+
+static inline void cec_get_digital_service_id(const __u8 *msg,
+ struct cec_op_digital_service_id *digital)
+{
+ digital->service_id_method = msg[0] >> 7;
+ digital->dig_bcast_system = msg[0] & 0x7f;
+ if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+ digital->channel.channel_number_fmt = msg[1] >> 2;
+ digital->channel.major = ((msg[1] & 3) << 6) | msg[2];
+ digital->channel.minor = (msg[3] << 8) | msg[4];
+ return;
+ }
+ digital->dvb.transport_id = (msg[1] << 8) | msg[2];
+ digital->dvb.service_id = (msg[3] << 8) | msg[4];
+ digital->dvb.orig_network_id = (msg[5] << 8) | msg[6];
+}
+
+static inline void cec_msg_record_on_own(struct cec_msg *msg)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_OWN;
+}
+
+static inline void cec_msg_record_on_digital(struct cec_msg *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 10;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL;
+ cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_record_on_analog(struct cec_msg *msg,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 7;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG;
+ msg->msg[3] = ana_bcast_type;
+ msg->msg[4] = ana_freq >> 8;
+ msg->msg[5] = ana_freq & 0xff;
+ msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_record_on_plug(struct cec_msg *msg,
+ __u8 plug)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG;
+ msg->msg[3] = plug;
+}
+
+static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
+ __u16 phys_addr)
+{
+ msg->len = 5;
+ msg->msg[1] = CEC_MSG_RECORD_ON;
+ msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR;
+ msg->msg[3] = phys_addr >> 8;
+ msg->msg[4] = phys_addr & 0xff;
+}
+
+static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
+ const struct cec_op_record_src *rec_src)
+{
+ switch (rec_src->type) {
+ case CEC_OP_RECORD_SRC_OWN:
+ cec_msg_record_on_own(msg);
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ cec_msg_record_on_digital(msg, &rec_src->digital);
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ cec_msg_record_on_analog(msg,
+ rec_src->analog.ana_bcast_type,
+ rec_src->analog.ana_freq,
+ rec_src->analog.bcast_system);
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ cec_msg_record_on_plug(msg, rec_src->ext_plug.plug);
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ cec_msg_record_on_phys_addr(msg,
+ rec_src->ext_phys_addr.phys_addr);
+ break;
+ }
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+static inline void cec_ops_record_on(const struct cec_msg *msg,
+ struct cec_op_record_src *rec_src)
+{
+ rec_src->type = msg->msg[2];
+ switch (rec_src->type) {
+ case CEC_OP_RECORD_SRC_OWN:
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ cec_get_digital_service_id(msg->msg + 3, &rec_src->digital);
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ rec_src->analog.ana_bcast_type = msg->msg[3];
+ rec_src->analog.ana_freq =
+ (msg->msg[4] << 8) | msg->msg[5];
+ rec_src->analog.bcast_system = msg->msg[6];
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ rec_src->ext_plug.plug = msg->msg[3];
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ rec_src->ext_phys_addr.phys_addr =
+ (msg->msg[3] << 8) | msg->msg[4];
+ break;
+ }
+}
+
+static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_RECORD_STATUS;
+ msg->msg[2] = rec_status;
+}
+
+static inline void cec_ops_record_status(const struct cec_msg *msg,
+ __u8 *rec_status)
+{
+ *rec_status = msg->msg[2];
+}
+
+static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
+ msg->reply = reply ? CEC_MSG_RECORD_ON : 0;
+}
+
+
+/* Timer Programming Feature */
+static inline void cec_msg_timer_status(struct cec_msg *msg,
+ __u8 timer_overlap_warning,
+ __u8 media_info,
+ __u8 prog_info,
+ __u8 prog_error,
+ __u8 duration_hr,
+ __u8 duration_min)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_TIMER_STATUS;
+ msg->msg[2] = (timer_overlap_warning << 7) |
+ (media_info << 5) |
+ (prog_info ? 0x10 : 0) |
+ (prog_info ? prog_info : prog_error);
+ if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+ prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+ prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+ msg->len += 2;
+ msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10);
+ }
+}
+
+static inline void cec_ops_timer_status(const struct cec_msg *msg,
+ __u8 *timer_overlap_warning,
+ __u8 *media_info,
+ __u8 *prog_info,
+ __u8 *prog_error,
+ __u8 *duration_hr,
+ __u8 *duration_min)
+{
+ *timer_overlap_warning = msg->msg[2] >> 7;
+ *media_info = (msg->msg[2] >> 5) & 3;
+ if (msg->msg[2] & 0x10) {
+ *prog_info = msg->msg[2] & 0xf;
+ *prog_error = 0;
+ } else {
+ *prog_info = 0;
+ *prog_error = msg->msg[2] & 0xf;
+ }
+ if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+ *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+ *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+ *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf);
+ *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ } else {
+ *duration_hr = *duration_min = 0;
+ }
+}
+
+static inline void cec_msg_timer_cleared_status(struct cec_msg *msg,
+ __u8 timer_cleared_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS;
+ msg->msg[2] = timer_cleared_status;
+}
+
+static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
+ __u8 *timer_cleared_status)
+{
+ *timer_cleared_status = msg->msg[2];
+}
+
+static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ana_bcast_type;
+ msg->msg[10] = ana_freq >> 8;
+ msg->msg[11] = ana_freq & 0xff;
+ msg->msg[12] = bcast_system;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ana_bcast_type = msg->msg[9];
+ *ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+ *bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 16;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+ msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ struct cec_op_digital_service_id *digital)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ext_src_spec,
+ __u8 plug,
+ __u16 phys_addr)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ext_src_spec;
+ msg->msg[10] = plug;
+ msg->msg[11] = phys_addr >> 8;
+ msg->msg[12] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ext_src_spec,
+ __u8 *plug,
+ __u16 *phys_addr)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ext_src_spec = msg->msg[9];
+ *plug = msg->msg[10];
+ *phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ana_bcast_type;
+ msg->msg[10] = ana_freq >> 8;
+ msg->msg[11] = ana_freq & 0xff;
+ msg->msg[12] = bcast_system;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ana_bcast_type = msg->msg[9];
+ *ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+ *bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 16;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+ msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ struct cec_op_digital_service_id *digital)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
+ bool reply,
+ __u8 day,
+ __u8 month,
+ __u8 start_hr,
+ __u8 start_min,
+ __u8 duration_hr,
+ __u8 duration_min,
+ __u8 recording_seq,
+ __u8 ext_src_spec,
+ __u8 plug,
+ __u16 phys_addr)
+{
+ msg->len = 13;
+ msg->msg[1] = CEC_MSG_SET_EXT_TIMER;
+ msg->msg[2] = day;
+ msg->msg[3] = month;
+ /* Hours and minutes are in BCD format */
+ msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+ msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+ msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+ msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+ msg->msg[8] = recording_seq;
+ msg->msg[9] = ext_src_spec;
+ msg->msg[10] = plug;
+ msg->msg[11] = phys_addr >> 8;
+ msg->msg[12] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_ext_timer(const struct cec_msg *msg,
+ __u8 *day,
+ __u8 *month,
+ __u8 *start_hr,
+ __u8 *start_min,
+ __u8 *duration_hr,
+ __u8 *duration_min,
+ __u8 *recording_seq,
+ __u8 *ext_src_spec,
+ __u8 *plug,
+ __u16 *phys_addr)
+{
+ *day = msg->msg[2];
+ *month = msg->msg[3];
+ /* Hours and minutes are in BCD format */
+ *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+ *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+ *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+ *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+ *recording_seq = msg->msg[8];
+ *ext_src_spec = msg->msg[9];
+ *plug = msg->msg[10];
+ *phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_timer_program_title(struct cec_msg *msg,
+ const char *prog_title)
+{
+ unsigned int len = strlen(prog_title);
+
+ if (len > 14)
+ len = 14;
+ msg->len = 2 + len;
+ msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE;
+ memcpy(msg->msg + 2, prog_title, len);
+}
+
+static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg,
+ char *prog_title)
+{
+ unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+ if (len > 14)
+ len = 14;
+ memcpy(prog_title, msg->msg + 2, len);
+ prog_title[len] = '\0';
+}
+
+/* System Information Feature */
+static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_CEC_VERSION;
+ msg->msg[2] = cec_version;
+}
+
+static inline void cec_ops_cec_version(const struct cec_msg *msg,
+ __u8 *cec_version)
+{
+ *cec_version = msg->msg[2];
+}
+
+static inline void cec_msg_get_cec_version(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
+ msg->reply = reply ? CEC_MSG_CEC_VERSION : 0;
+}
+
+static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
+ __u16 phys_addr, __u8 prim_devtype)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->msg[4] = prim_devtype;
+}
+
+static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
+ __u16 *phys_addr, __u8 *prim_devtype)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *prim_devtype = msg->msg[4];
+}
+
+static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
+ msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0;
+}
+
+static inline void cec_msg_set_menu_language(struct cec_msg *msg,
+ const char *language)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
+ memcpy(msg->msg + 2, language, 3);
+}
+
+static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
+ char *language)
+{
+ memcpy(language, msg->msg + 2, 3);
+ language[3] = '\0';
+}
+
+static inline void cec_msg_get_menu_language(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
+ msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0;
+}
+
+/*
+ * Assumes a single RC Profile byte and a single Device Features byte,
+ * i.e. no extended features are supported by this helper function.
+ *
+ * As of CEC 2.0 no extended features are defined, should those be added
+ * in the future, then this function needs to be adapted or a new function
+ * should be added.
+ */
+static inline void cec_msg_report_features(struct cec_msg *msg,
+ __u8 cec_version, __u8 all_device_types,
+ __u8 rc_profile, __u8 dev_features)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = cec_version;
+ msg->msg[3] = all_device_types;
+ msg->msg[4] = rc_profile;
+ msg->msg[5] = dev_features;
+}
+
+static inline void cec_ops_report_features(const struct cec_msg *msg,
+ __u8 *cec_version, __u8 *all_device_types,
+ const __u8 **rc_profile, const __u8 **dev_features)
+{
+ const __u8 *p = &msg->msg[4];
+
+ *cec_version = msg->msg[2];
+ *all_device_types = msg->msg[3];
+ *rc_profile = p;
+ while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
+ p++;
+ if (!(*p & CEC_OP_FEAT_EXT)) {
+ *dev_features = p + 1;
+ while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
+ p++;
+ }
+ if (*p & CEC_OP_FEAT_EXT)
+ *rc_profile = *dev_features = NULL;
+}
+
+static inline void cec_msg_give_features(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_FEATURES;
+ msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0;
+}
+
+/* Deck Control Feature */
+static inline void cec_msg_deck_control(struct cec_msg *msg,
+ __u8 deck_control_mode)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_DECK_CONTROL;
+ msg->msg[2] = deck_control_mode;
+}
+
+static inline void cec_ops_deck_control(const struct cec_msg *msg,
+ __u8 *deck_control_mode)
+{
+ *deck_control_mode = msg->msg[2];
+}
+
+static inline void cec_msg_deck_status(struct cec_msg *msg,
+ __u8 deck_info)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_DECK_STATUS;
+ msg->msg[2] = deck_info;
+}
+
+static inline void cec_ops_deck_status(const struct cec_msg *msg,
+ __u8 *deck_info)
+{
+ *deck_info = msg->msg[2];
+}
+
+static inline void cec_msg_give_deck_status(struct cec_msg *msg,
+ bool reply,
+ __u8 status_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
+ msg->msg[2] = status_req;
+ msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+}
+
+static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
+ __u8 *status_req)
+{
+ *status_req = msg->msg[2];
+}
+
+static inline void cec_msg_play(struct cec_msg *msg,
+ __u8 play_mode)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_PLAY;
+ msg->msg[2] = play_mode;
+}
+
+static inline void cec_ops_play(const struct cec_msg *msg,
+ __u8 *play_mode)
+{
+ *play_mode = msg->msg[2];
+}
+
+
+/* Tuner Control Feature */
+struct cec_op_tuner_device_info {
+ __u8 rec_flag;
+ __u8 tuner_display_info;
+ bool is_analog;
+ union {
+ struct cec_op_digital_service_id digital;
+ struct {
+ __u8 ana_bcast_type;
+ __u16 ana_freq;
+ __u8 bcast_system;
+ } analog;
+ };
+};
+
+static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg,
+ __u8 rec_flag,
+ __u8 tuner_display_info,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 7;
+ msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+ msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+ msg->msg[3] = ana_bcast_type;
+ msg->msg[4] = ana_freq >> 8;
+ msg->msg[5] = ana_freq & 0xff;
+ msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg,
+ __u8 rec_flag, __u8 tuner_display_info,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 10;
+ msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+ msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+ cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_tuner_device_status(struct cec_msg *msg,
+ const struct cec_op_tuner_device_info *tuner_dev_info)
+{
+ if (tuner_dev_info->is_analog)
+ cec_msg_tuner_device_status_analog(msg,
+ tuner_dev_info->rec_flag,
+ tuner_dev_info->tuner_display_info,
+ tuner_dev_info->analog.ana_bcast_type,
+ tuner_dev_info->analog.ana_freq,
+ tuner_dev_info->analog.bcast_system);
+ else
+ cec_msg_tuner_device_status_digital(msg,
+ tuner_dev_info->rec_flag,
+ tuner_dev_info->tuner_display_info,
+ &tuner_dev_info->digital);
+}
+
+static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
+ struct cec_op_tuner_device_info *tuner_dev_info)
+{
+ tuner_dev_info->is_analog = msg->len < 10;
+ tuner_dev_info->rec_flag = msg->msg[2] >> 7;
+ tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f;
+ if (tuner_dev_info->is_analog) {
+ tuner_dev_info->analog.ana_bcast_type = msg->msg[3];
+ tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5];
+ tuner_dev_info->analog.bcast_system = msg->msg[6];
+ return;
+ }
+ cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital);
+}
+
+static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
+ bool reply,
+ __u8 status_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
+ msg->msg[2] = status_req;
+ msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+}
+
+static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
+ __u8 *status_req)
+{
+ *status_req = msg->msg[2];
+}
+
+static inline void cec_msg_select_analogue_service(struct cec_msg *msg,
+ __u8 ana_bcast_type,
+ __u16 ana_freq,
+ __u8 bcast_system)
+{
+ msg->len = 6;
+ msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE;
+ msg->msg[2] = ana_bcast_type;
+ msg->msg[3] = ana_freq >> 8;
+ msg->msg[4] = ana_freq & 0xff;
+ msg->msg[5] = bcast_system;
+}
+
+static inline void cec_ops_select_analogue_service(const struct cec_msg *msg,
+ __u8 *ana_bcast_type,
+ __u16 *ana_freq,
+ __u8 *bcast_system)
+{
+ *ana_bcast_type = msg->msg[2];
+ *ana_freq = (msg->msg[3] << 8) | msg->msg[4];
+ *bcast_system = msg->msg[5];
+}
+
+static inline void cec_msg_select_digital_service(struct cec_msg *msg,
+ const struct cec_op_digital_service_id *digital)
+{
+ msg->len = 9;
+ msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE;
+ cec_set_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_ops_select_digital_service(const struct cec_msg *msg,
+ struct cec_op_digital_service_id *digital)
+{
+ cec_get_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT;
+}
+
+static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT;
+}
+
+
+/* Vendor Specific Commands Feature */
+static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+}
+
+static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
+ __u32 *vendor_id)
+{
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+}
+
+static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
+ msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
+}
+
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP;
+}
+
+
+/* OSD Display Feature */
+static inline void cec_msg_set_osd_string(struct cec_msg *msg,
+ __u8 disp_ctl,
+ const char *osd)
+{
+ unsigned int len = strlen(osd);
+
+ if (len > 13)
+ len = 13;
+ msg->len = 3 + len;
+ msg->msg[1] = CEC_MSG_SET_OSD_STRING;
+ msg->msg[2] = disp_ctl;
+ memcpy(msg->msg + 3, osd, len);
+}
+
+static inline void cec_ops_set_osd_string(const struct cec_msg *msg,
+ __u8 *disp_ctl,
+ char *osd)
+{
+ unsigned int len = msg->len > 3 ? msg->len - 3 : 0;
+
+ *disp_ctl = msg->msg[2];
+ if (len > 13)
+ len = 13;
+ memcpy(osd, msg->msg + 3, len);
+ osd[len] = '\0';
+}
+
+
+/* Device OSD Transfer Feature */
+static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name)
+{
+ unsigned int len = strlen(name);
+
+ if (len > 14)
+ len = 14;
+ msg->len = 2 + len;
+ msg->msg[1] = CEC_MSG_SET_OSD_NAME;
+ memcpy(msg->msg + 2, name, len);
+}
+
+static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
+ char *name)
+{
+ unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+ if (len > 14)
+ len = 14;
+ memcpy(name, msg->msg + 2, len);
+ name[len] = '\0';
+}
+
+static inline void cec_msg_give_osd_name(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
+ msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0;
+}
+
+
+/* Device Menu Control Feature */
+static inline void cec_msg_menu_status(struct cec_msg *msg,
+ __u8 menu_state)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_MENU_STATUS;
+ msg->msg[2] = menu_state;
+}
+
+static inline void cec_ops_menu_status(const struct cec_msg *msg,
+ __u8 *menu_state)
+{
+ *menu_state = msg->msg[2];
+}
+
+static inline void cec_msg_menu_request(struct cec_msg *msg,
+ bool reply,
+ __u8 menu_req)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_MENU_REQUEST;
+ msg->msg[2] = menu_req;
+ msg->reply = reply ? CEC_MSG_MENU_STATUS : 0;
+}
+
+static inline void cec_ops_menu_request(const struct cec_msg *msg,
+ __u8 *menu_req)
+{
+ *menu_req = msg->msg[2];
+}
+
+struct cec_op_ui_command {
+ __u8 ui_cmd;
+ bool has_opt_arg;
+ union {
+ struct cec_op_channel_data channel_identifier;
+ __u8 ui_broadcast_type;
+ __u8 ui_sound_presentation_control;
+ __u8 play_mode;
+ __u8 ui_function_media;
+ __u8 ui_function_select_av_input;
+ __u8 ui_function_select_audio_input;
+ };
+};
+
+static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
+ const struct cec_op_ui_command *ui_cmd)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED;
+ msg->msg[2] = ui_cmd->ui_cmd;
+ if (!ui_cmd->has_opt_arg)
+ return;
+ switch (ui_cmd->ui_cmd) {
+ case 0x56:
+ case 0x57:
+ case 0x60:
+ case 0x68:
+ case 0x69:
+ case 0x6a:
+ /* The optional operand is one byte for all these ui commands */
+ msg->len++;
+ msg->msg[3] = ui_cmd->play_mode;
+ break;
+ case 0x67:
+ msg->len += 4;
+ msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
+ (ui_cmd->channel_identifier.major >> 8);
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
+ msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
+ msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
+ break;
+ }
+}
+
+static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
+ struct cec_op_ui_command *ui_cmd)
+{
+ ui_cmd->ui_cmd = msg->msg[2];
+ ui_cmd->has_opt_arg = false;
+ if (msg->len == 3)
+ return;
+ switch (ui_cmd->ui_cmd) {
+ case 0x56:
+ case 0x57:
+ case 0x60:
+ case 0x68:
+ case 0x69:
+ case 0x6a:
+ /* The optional operand is one byte for all these ui commands */
+ ui_cmd->play_mode = msg->msg[3];
+ ui_cmd->has_opt_arg = true;
+ break;
+ case 0x67:
+ if (msg->len < 7)
+ break;
+ ui_cmd->has_opt_arg = true;
+ ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
+ ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
+ ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
+ break;
+ }
+}
+
+static inline void cec_msg_user_control_released(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED;
+}
+
+/* Remote Control Passthrough Feature */
+
+/* Power Status Feature */
+static inline void cec_msg_report_power_status(struct cec_msg *msg,
+ __u8 pwr_state)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS;
+ msg->msg[2] = pwr_state;
+}
+
+static inline void cec_ops_report_power_status(const struct cec_msg *msg,
+ __u8 *pwr_state)
+{
+ *pwr_state = msg->msg[2];
+}
+
+static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
+ msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0;
+}
+
+/* General Protocol Messages */
+static inline void cec_msg_feature_abort(struct cec_msg *msg,
+ __u8 abort_msg, __u8 reason)
+{
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+ msg->msg[2] = abort_msg;
+ msg->msg[3] = reason;
+}
+
+static inline void cec_ops_feature_abort(const struct cec_msg *msg,
+ __u8 *abort_msg, __u8 *reason)
+{
+ *abort_msg = msg->msg[2];
+ *reason = msg->msg[3];
+}
+
+/* This changes the current message into a feature abort message */
+static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason)
+{
+ cec_msg_set_reply_to(msg, msg);
+ msg->len = 4;
+ msg->msg[2] = msg->msg[1];
+ msg->msg[3] = reason;
+ msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+}
+
+static inline void cec_msg_abort(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_ABORT;
+}
+
+
+/* System Audio Control Feature */
+static inline void cec_msg_report_audio_status(struct cec_msg *msg,
+ __u8 aud_mute_status,
+ __u8 aud_vol_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS;
+ msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f);
+}
+
+static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
+ __u8 *aud_mute_status,
+ __u8 *aud_vol_status)
+{
+ *aud_mute_status = msg->msg[2] >> 7;
+ *aud_vol_status = msg->msg[2] & 0x7f;
+}
+
+static inline void cec_msg_give_audio_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
+ msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0;
+}
+
+static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg,
+ __u8 sys_aud_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE;
+ msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
+ __u8 *sys_aud_status)
+{
+ *sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
+ bool reply,
+ __u16 phys_addr)
+{
+ msg->len = phys_addr == 0xffff ? 2 : 4;
+ msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0;
+
+}
+
+static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ if (msg->len < 4)
+ *phys_addr = 0xffff;
+ else
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg,
+ __u8 sys_aud_status)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS;
+ msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
+ __u8 *sys_aud_status)
+{
+ *sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
+ msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0;
+}
+
+static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg,
+ __u8 num_descriptors,
+ const __u32 *descriptors)
+{
+ unsigned int i;
+
+ if (num_descriptors > 4)
+ num_descriptors = 4;
+ msg->len = 2 + num_descriptors * 3;
+ msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR;
+ for (i = 0; i < num_descriptors; i++) {
+ msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff;
+ msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff;
+ msg->msg[4 + i * 3] = descriptors[i] & 0xff;
+ }
+}
+
+static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg,
+ __u8 *num_descriptors,
+ __u32 *descriptors)
+{
+ unsigned int i;
+
+ *num_descriptors = (msg->len - 2) / 3;
+ if (*num_descriptors > 4)
+ *num_descriptors = 4;
+ for (i = 0; i < *num_descriptors; i++)
+ descriptors[i] = (msg->msg[2 + i * 3] << 16) |
+ (msg->msg[3 + i * 3] << 8) |
+ msg->msg[4 + i * 3];
+}
+
+static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
+ bool reply,
+ __u8 num_descriptors,
+ const __u8 *audio_format_id,
+ const __u8 *audio_format_code)
+{
+ unsigned int i;
+
+ if (num_descriptors > 4)
+ num_descriptors = 4;
+ msg->len = 2 + num_descriptors;
+ msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR;
+ msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0;
+ for (i = 0; i < num_descriptors; i++)
+ msg->msg[2 + i] = (audio_format_id[i] << 6) |
+ (audio_format_code[i] & 0x3f);
+}
+
+static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg,
+ __u8 *num_descriptors,
+ __u8 *audio_format_id,
+ __u8 *audio_format_code)
+{
+ unsigned int i;
+
+ *num_descriptors = msg->len - 2;
+ if (*num_descriptors > 4)
+ *num_descriptors = 4;
+ for (i = 0; i < *num_descriptors; i++) {
+ audio_format_id[i] = msg->msg[2 + i] >> 6;
+ audio_format_code[i] = msg->msg[2 + i] & 0x3f;
+ }
+}
+
+
+/* Audio Rate Control Feature */
+static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
+ __u8 audio_rate)
+{
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_SET_AUDIO_RATE;
+ msg->msg[2] = audio_rate;
+}
+
+static inline void cec_ops_set_audio_rate(const struct cec_msg *msg,
+ __u8 *audio_rate)
+{
+ *audio_rate = msg->msg[2];
+}
+
+
+/* Audio Return Channel Control Feature */
+static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED;
+}
+
+static inline void cec_msg_initiate_arc(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_INITIATE_ARC;
+ msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0;
+}
+
+static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
+ msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0;
+}
+
+static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED;
+}
+
+static inline void cec_msg_terminate_arc(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_TERMINATE_ARC;
+ msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0;
+}
+
+static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
+ bool reply)
+{
+ msg->len = 2;
+ msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
+ msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0;
+}
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+static inline void cec_msg_report_current_latency(struct cec_msg *msg,
+ __u16 phys_addr,
+ __u8 video_latency,
+ __u8 low_latency_mode,
+ __u8 audio_out_compensated,
+ __u8 audio_out_delay)
+{
+ msg->len = 7;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->msg[4] = video_latency;
+ msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
+ msg->msg[6] = audio_out_delay;
+}
+
+static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *video_latency,
+ __u8 *low_latency_mode,
+ __u8 *audio_out_compensated,
+ __u8 *audio_out_delay)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *video_latency = msg->msg[4];
+ *low_latency_mode = (msg->msg[5] >> 2) & 1;
+ *audio_out_compensated = msg->msg[5] & 3;
+ *audio_out_delay = msg->msg[6];
+}
+
+static inline void cec_msg_request_current_latency(struct cec_msg *msg,
+ bool reply,
+ __u16 phys_addr)
+{
+ msg->len = 4;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
+ msg->msg[2] = phys_addr >> 8;
+ msg->msg[3] = phys_addr & 0xff;
+ msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0;
+}
+
+static inline void cec_ops_request_current_latency(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Capability Discovery and Control Feature */
+static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2)
+{
+ msg->len = 9;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+}
+
+static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
+ __u16 target_phys_addr,
+ __u8 hec_func_state,
+ __u8 host_func_state,
+ __u8 enc_func_state,
+ __u8 cdc_errcode,
+ __u8 has_field,
+ __u16 hec_field)
+{
+ msg->len = has_field ? 10 : 8;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
+ msg->msg[5] = target_phys_addr >> 8;
+ msg->msg[6] = target_phys_addr & 0xff;
+ msg->msg[7] = (hec_func_state << 6) |
+ (host_func_state << 4) |
+ (enc_func_state << 2) |
+ cdc_errcode;
+ if (has_field) {
+ msg->msg[8] = hec_field >> 8;
+ msg->msg[9] = hec_field & 0xff;
+ }
+}
+
+static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *target_phys_addr,
+ __u8 *hec_func_state,
+ __u8 *host_func_state,
+ __u8 *enc_func_state,
+ __u8 *cdc_errcode,
+ __u8 *has_field,
+ __u16 *hec_field)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6];
+ *hec_func_state = msg->msg[7] >> 6;
+ *host_func_state = (msg->msg[7] >> 4) & 3;
+ *enc_func_state = (msg->msg[7] >> 4) & 3;
+ *cdc_errcode = msg->msg[7] & 3;
+ *has_field = msg->len >= 10;
+ *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0;
+}
+
+static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2,
+ __u8 hec_set_state,
+ __u16 phys_addr3,
+ __u16 phys_addr4,
+ __u16 phys_addr5)
+{
+ msg->len = 10;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+ msg->msg[9] = hec_set_state;
+ if (phys_addr3 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr3 >> 8;
+ msg->msg[msg->len++] = phys_addr3 & 0xff;
+ if (phys_addr4 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr4 >> 8;
+ msg->msg[msg->len++] = phys_addr4 & 0xff;
+ if (phys_addr5 != CEC_PHYS_ADDR_INVALID) {
+ msg->msg[msg->len++] = phys_addr5 >> 8;
+ msg->msg[msg->len++] = phys_addr5 & 0xff;
+ }
+ }
+ }
+}
+
+static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2,
+ __u8 *hec_set_state,
+ __u16 *phys_addr3,
+ __u16 *phys_addr4,
+ __u16 *phys_addr5)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+ *hec_set_state = msg->msg[9];
+ *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID;
+ if (msg->len >= 12)
+ *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11];
+ if (msg->len >= 14)
+ *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13];
+ if (msg->len >= 16)
+ *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15];
+}
+
+static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u8 hec_set_state)
+{
+ msg->len = 8;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = hec_set_state;
+}
+
+static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u8 *hec_set_state)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *hec_set_state = msg->msg[7];
+}
+
+static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
+ __u16 phys_addr1,
+ __u16 phys_addr2,
+ __u16 phys_addr3)
+{
+ msg->len = 11;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
+ msg->msg[5] = phys_addr1 >> 8;
+ msg->msg[6] = phys_addr1 & 0xff;
+ msg->msg[7] = phys_addr2 >> 8;
+ msg->msg[8] = phys_addr2 & 0xff;
+ msg->msg[9] = phys_addr3 >> 8;
+ msg->msg[10] = phys_addr3 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u16 *phys_addr1,
+ __u16 *phys_addr2,
+ __u16 *phys_addr3)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+ *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+ *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10];
+}
+
+static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
+}
+
+static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
+{
+ msg->len = 5;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
+}
+
+static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg,
+ __u16 *phys_addr)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
+ __u8 input_port,
+ __u8 hpd_state)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
+ msg->msg[5] = (input_port << 4) | hpd_state;
+}
+
+static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *input_port,
+ __u8 *hpd_state)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *input_port = msg->msg[5] >> 4;
+ *hpd_state = msg->msg[5] & 0xf;
+}
+
+static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
+ __u8 hpd_state,
+ __u8 hpd_error)
+{
+ msg->len = 6;
+ msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+ /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+ msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
+ msg->msg[5] = (hpd_state << 4) | hpd_error;
+}
+
+static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg,
+ __u16 *phys_addr,
+ __u8 *hpd_state,
+ __u8 *hpd_error)
+{
+ *phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+ *hpd_state = msg->msg[5] >> 4;
+ *hpd_error = msg->msg[5] & 0xf;
+}
+
+#endif
diff --git a/include/linux/cec.h b/include/linux/cec.h
new file mode 100644
index 000000000..851968e80
--- /dev/null
+++ b/include/linux/cec.h
@@ -0,0 +1,1014 @@
+/*
+ * cec - HDMI Consumer Electronics Control public header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Note: this framework is still in staging and it is likely the API
+ * will change before it goes out of staging.
+ *
+ * Once it is moved out of staging this header will move to uapi.
+ */
+#ifndef _CEC_UAPI_H
+#define _CEC_UAPI_H
+
+#include <linux/types.h>
+
+#define CEC_MAX_MSG_SIZE 16
+
+/**
+ * struct cec_msg - CEC message structure.
+ * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ * driver when the message transmission has finished.
+ * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ * driver when the message was received.
+ * @len: Length in bytes of the message.
+ * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE.
+ * Set to 0 if you want to wait forever. This timeout can also be
+ * used with CEC_TRANSMIT as the timeout for waiting for a reply.
+ * If 0, then it will use a 1 second timeout instead of waiting
+ * forever as is done with CEC_RECEIVE.
+ * @sequence: The framework assigns a sequence number to messages that are
+ * sent. This can be used to track replies to previously sent
+ * messages.
+ * @flags: Set to 0.
+ * @msg: The message payload.
+ * @reply: This field is ignored with CEC_RECEIVE and is only used by
+ * CEC_TRANSMIT. If non-zero, then wait for a reply with this
+ * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for
+ * a possible ABORT reply. If there was an error when sending the
+ * msg or FeatureAbort was returned, then reply is set to 0.
+ * If reply is non-zero upon return, then len/msg are set to
+ * the received message.
+ * If reply is zero upon return and status has the
+ * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to
+ * the received feature abort message.
+ * If reply is zero upon return and status has the
+ * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at
+ * all. If reply is non-zero for CEC_TRANSMIT and the message is a
+ * broadcast, then -EINVAL is returned.
+ * if reply is non-zero, then timeout is set to 1000 (the required
+ * maximum response time).
+ * @rx_status: The message receive status bits. Set by the driver.
+ * @tx_status: The message transmit status bits. Set by the driver.
+ * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver.
+ * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver.
+ * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the
+ * driver.
+ * @tx_error_cnt: The number of 'Error' events. Set by the driver.
+ */
+struct cec_msg {
+ __u64 tx_ts;
+ __u64 rx_ts;
+ __u32 len;
+ __u32 timeout;
+ __u32 sequence;
+ __u32 flags;
+ __u8 msg[CEC_MAX_MSG_SIZE];
+ __u8 reply;
+ __u8 rx_status;
+ __u8 tx_status;
+ __u8 tx_arb_lost_cnt;
+ __u8 tx_nack_cnt;
+ __u8 tx_low_drive_cnt;
+ __u8 tx_error_cnt;
+};
+
+/**
+ * cec_msg_initiator - return the initiator's logical address.
+ * @msg: the message structure
+ */
+static inline __u8 cec_msg_initiator(const struct cec_msg *msg)
+{
+ return msg->msg[0] >> 4;
+}
+
+/**
+ * cec_msg_destination - return the destination's logical address.
+ * @msg: the message structure
+ */
+static inline __u8 cec_msg_destination(const struct cec_msg *msg)
+{
+ return msg->msg[0] & 0xf;
+}
+
+/**
+ * cec_msg_opcode - return the opcode of the message, -1 for poll
+ * @msg: the message structure
+ */
+static inline int cec_msg_opcode(const struct cec_msg *msg)
+{
+ return msg->len > 1 ? msg->msg[1] : -1;
+}
+
+/**
+ * cec_msg_is_broadcast - return true if this is a broadcast message.
+ * @msg: the message structure
+ */
+static inline bool cec_msg_is_broadcast(const struct cec_msg *msg)
+{
+ return (msg->msg[0] & 0xf) == 0xf;
+}
+
+/**
+ * cec_msg_init - initialize the message structure.
+ * @msg: the message structure
+ * @initiator: the logical address of the initiator
+ * @destination:the logical address of the destination (0xf for broadcast)
+ *
+ * The whole structure is zeroed, the len field is set to 1 (i.e. a poll
+ * message) and the initiator and destination are filled in.
+ */
+static inline void cec_msg_init(struct cec_msg *msg,
+ __u8 initiator, __u8 destination)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->msg[0] = (initiator << 4) | destination;
+ msg->len = 1;
+}
+
+/**
+ * cec_msg_set_reply_to - fill in destination/initiator in a reply message.
+ * @msg: the message structure for the reply
+ * @orig: the original message structure
+ *
+ * Set the msg destination to the orig initiator and the msg initiator to the
+ * orig destination. Note that msg and orig may be the same pointer, in which
+ * case the change is done in place.
+ */
+static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+ struct cec_msg *orig)
+{
+ /* The destination becomes the initiator and vice versa */
+ msg->msg[0] = (cec_msg_destination(orig) << 4) |
+ cec_msg_initiator(orig);
+ msg->reply = msg->timeout = 0;
+}
+
+/* cec status field */
+#define CEC_TX_STATUS_OK (1 << 0)
+#define CEC_TX_STATUS_ARB_LOST (1 << 1)
+#define CEC_TX_STATUS_NACK (1 << 2)
+#define CEC_TX_STATUS_LOW_DRIVE (1 << 3)
+#define CEC_TX_STATUS_ERROR (1 << 4)
+#define CEC_TX_STATUS_MAX_RETRIES (1 << 5)
+
+#define CEC_RX_STATUS_OK (1 << 0)
+#define CEC_RX_STATUS_TIMEOUT (1 << 1)
+#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2)
+
+static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
+{
+ if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
+ return false;
+ if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
+ return false;
+ if (!msg->tx_status && !msg->rx_status)
+ return false;
+ return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
+}
+
+#define CEC_LOG_ADDR_INVALID 0xff
+#define CEC_PHYS_ADDR_INVALID 0xffff
+
+/*
+ * The maximum number of logical addresses one device can be assigned to.
+ * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The
+ * Analog Devices CEC hardware supports 3. So let's go wild and go for 4.
+ */
+#define CEC_MAX_LOG_ADDRS 4
+
+/* The logical addresses defined by CEC 2.0 */
+#define CEC_LOG_ADDR_TV 0
+#define CEC_LOG_ADDR_RECORD_1 1
+#define CEC_LOG_ADDR_RECORD_2 2
+#define CEC_LOG_ADDR_TUNER_1 3
+#define CEC_LOG_ADDR_PLAYBACK_1 4
+#define CEC_LOG_ADDR_AUDIOSYSTEM 5
+#define CEC_LOG_ADDR_TUNER_2 6
+#define CEC_LOG_ADDR_TUNER_3 7
+#define CEC_LOG_ADDR_PLAYBACK_2 8
+#define CEC_LOG_ADDR_RECORD_3 9
+#define CEC_LOG_ADDR_TUNER_4 10
+#define CEC_LOG_ADDR_PLAYBACK_3 11
+#define CEC_LOG_ADDR_BACKUP_1 12
+#define CEC_LOG_ADDR_BACKUP_2 13
+#define CEC_LOG_ADDR_SPECIFIC 14
+#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */
+#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */
+
+/* The logical address types that the CEC device wants to claim */
+#define CEC_LOG_ADDR_TYPE_TV 0
+#define CEC_LOG_ADDR_TYPE_RECORD 1
+#define CEC_LOG_ADDR_TYPE_TUNER 2
+#define CEC_LOG_ADDR_TYPE_PLAYBACK 3
+#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4
+#define CEC_LOG_ADDR_TYPE_SPECIFIC 5
+#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6
+/*
+ * Switches should use UNREGISTERED.
+ * Processors should use SPECIFIC.
+ */
+
+#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV)
+#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \
+ (1 << CEC_LOG_ADDR_RECORD_2) | \
+ (1 << CEC_LOG_ADDR_RECORD_3))
+#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \
+ (1 << CEC_LOG_ADDR_TUNER_2) | \
+ (1 << CEC_LOG_ADDR_TUNER_3) | \
+ (1 << CEC_LOG_ADDR_TUNER_4))
+#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \
+ (1 << CEC_LOG_ADDR_PLAYBACK_2) | \
+ (1 << CEC_LOG_ADDR_PLAYBACK_3))
+#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM)
+#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \
+ (1 << CEC_LOG_ADDR_BACKUP_2))
+#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC)
+#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED)
+
+static inline bool cec_has_tv(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
+}
+
+static inline bool cec_has_record(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
+}
+
+static inline bool cec_has_tuner(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
+}
+
+static inline bool cec_has_playback(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
+}
+
+static inline bool cec_has_audiosystem(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+}
+
+static inline bool cec_has_backup(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
+}
+
+static inline bool cec_has_specific(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
+}
+
+static inline bool cec_is_unregistered(__u16 log_addr_mask)
+{
+ return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
+}
+
+static inline bool cec_is_unconfigured(__u16 log_addr_mask)
+{
+ return log_addr_mask == 0;
+}
+
+/*
+ * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID
+ * should be disabled (CEC_S_VENDOR_ID)
+ */
+#define CEC_VENDOR_ID_NONE 0xffffffff
+
+/* The message handling modes */
+/* Modes for initiator */
+#define CEC_MODE_NO_INITIATOR (0x0 << 0)
+#define CEC_MODE_INITIATOR (0x1 << 0)
+#define CEC_MODE_EXCL_INITIATOR (0x2 << 0)
+#define CEC_MODE_INITIATOR_MSK 0x0f
+
+/* Modes for follower */
+#define CEC_MODE_NO_FOLLOWER (0x0 << 4)
+#define CEC_MODE_FOLLOWER (0x1 << 4)
+#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4)
+#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4)
+#define CEC_MODE_MONITOR (0xe << 4)
+#define CEC_MODE_MONITOR_ALL (0xf << 4)
+#define CEC_MODE_FOLLOWER_MSK 0xf0
+
+/* Userspace has to configure the physical address */
+#define CEC_CAP_PHYS_ADDR (1 << 0)
+/* Userspace has to configure the logical addresses */
+#define CEC_CAP_LOG_ADDRS (1 << 1)
+/* Userspace can transmit messages (and thus become follower as well) */
+#define CEC_CAP_TRANSMIT (1 << 2)
+/*
+ * Passthrough all messages instead of processing them.
+ */
+#define CEC_CAP_PASSTHROUGH (1 << 3)
+/* Supports remote control */
+#define CEC_CAP_RC (1 << 4)
+/* Hardware can monitor all messages, not just directed and broadcast. */
+#define CEC_CAP_MONITOR_ALL (1 << 5)
+
+/**
+ * struct cec_caps - CEC capabilities structure.
+ * @driver: name of the CEC device driver.
+ * @name: name of the CEC device. @driver + @name must be unique.
+ * @available_log_addrs: number of available logical addresses.
+ * @capabilities: capabilities of the CEC adapter.
+ * @version: version of the CEC adapter framework.
+ */
+struct cec_caps {
+ char driver[32];
+ char name[32];
+ __u32 available_log_addrs;
+ __u32 capabilities;
+ __u32 version;
+};
+
+/**
+ * struct cec_log_addrs - CEC logical addresses structure.
+ * @log_addr: the claimed logical addresses. Set by the driver.
+ * @log_addr_mask: current logical address mask. Set by the driver.
+ * @cec_version: the CEC version that the adapter should implement. Set by the
+ * caller.
+ * @num_log_addrs: how many logical addresses should be claimed. Set by the
+ * caller.
+ * @vendor_id: the vendor ID of the device. Set by the caller.
+ * @flags: flags.
+ * @osd_name: the OSD name of the device. Set by the caller.
+ * @primary_device_type: the primary device type for each logical address.
+ * Set by the caller.
+ * @log_addr_type: the logical address types. Set by the caller.
+ * @all_device_types: CEC 2.0: all device types represented by the logical
+ * address. Set by the caller.
+ * @features: CEC 2.0: The logical address features. Set by the caller.
+ */
+struct cec_log_addrs {
+ __u8 log_addr[CEC_MAX_LOG_ADDRS];
+ __u16 log_addr_mask;
+ __u8 cec_version;
+ __u8 num_log_addrs;
+ __u32 vendor_id;
+ __u32 flags;
+ char osd_name[15];
+ __u8 primary_device_type[CEC_MAX_LOG_ADDRS];
+ __u8 log_addr_type[CEC_MAX_LOG_ADDRS];
+
+ /* CEC 2.0 */
+ __u8 all_device_types[CEC_MAX_LOG_ADDRS];
+ __u8 features[CEC_MAX_LOG_ADDRS][12];
+};
+
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+
+/* Events */
+
+/* Event that occurs when the adapter state changes */
+#define CEC_EVENT_STATE_CHANGE 1
+/*
+ * This event is sent when messages are lost because the application
+ * didn't empty the message queue in time
+ */
+#define CEC_EVENT_LOST_MSGS 2
+
+#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
+
+/**
+ * struct cec_event_state_change - used when the CEC adapter changes state.
+ * @phys_addr: the current physical address
+ * @log_addr_mask: the current logical address mask
+ */
+struct cec_event_state_change {
+ __u16 phys_addr;
+ __u16 log_addr_mask;
+};
+
+/**
+ * struct cec_event_lost_msgs - tells you how many messages were lost due.
+ * @lost_msgs: how many messages were lost.
+ */
+struct cec_event_lost_msgs {
+ __u32 lost_msgs;
+};
+
+/**
+ * struct cec_event - CEC event structure
+ * @ts: the timestamp of when the event was sent.
+ * @event: the event.
+ * array.
+ * @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
+ * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
+ * @raw: array to pad the union.
+ */
+struct cec_event {
+ __u64 ts;
+ __u32 event;
+ __u32 flags;
+ union {
+ struct cec_event_state_change state_change;
+ struct cec_event_lost_msgs lost_msgs;
+ __u32 raw[16];
+ };
+};
+
+/* ioctls */
+
+/* Adapter capabilities */
+#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps)
+
+/*
+ * phys_addr is either 0 (if this is the CEC root device)
+ * or a valid physical address obtained from the sink's EDID
+ * as read by this CEC device (if this is a source device)
+ * or a physical address obtained and modified from a sink
+ * EDID and used for a sink CEC device.
+ * If nothing is connected, then phys_addr is 0xffff.
+ * See HDMI 1.4b, section 8.7 (Physical Address).
+ *
+ * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled
+ * internally.
+ */
+#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16)
+#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16)
+
+/*
+ * Configure the CEC adapter. It sets the device type and which
+ * logical types it will try to claim. It will return which
+ * logical addresses it could actually claim.
+ * An error is returned if the adapter is disabled or if there
+ * is no physical address assigned.
+ */
+
+#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs)
+#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs)
+
+/* Transmit/receive a CEC command */
+#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg)
+#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg)
+
+/* Dequeue CEC events */
+#define CEC_DQEVENT _IOWR('a', 7, struct cec_event)
+
+/*
+ * Get and set the message handling mode for this filehandle.
+ */
+#define CEC_G_MODE _IOR('a', 8, __u32)
+#define CEC_S_MODE _IOW('a', 9, __u32)
+
+/*
+ * The remainder of this header defines all CEC messages and operands.
+ * The format matters since it the cec-ctl utility parses it to generate
+ * code for implementing all these messages.
+ *
+ * Comments ending with 'Feature' group messages for each feature.
+ * If messages are part of multiple features, then the "Has also"
+ * comment is used to list the previously defined messages that are
+ * supported by the feature.
+ *
+ * Before operands are defined a comment is added that gives the
+ * name of the operand and in brackets the variable name of the
+ * corresponding argument in the cec-funcs.h function.
+ */
+
+/* Messages */
+
+/* One Touch Play Feature */
+#define CEC_MSG_ACTIVE_SOURCE 0x82
+#define CEC_MSG_IMAGE_VIEW_ON 0x04
+#define CEC_MSG_TEXT_VIEW_ON 0x0d
+
+
+/* Routing Control Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_ACTIVE_SOURCE
+ */
+
+#define CEC_MSG_INACTIVE_SOURCE 0x9d
+#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85
+#define CEC_MSG_ROUTING_CHANGE 0x80
+#define CEC_MSG_ROUTING_INFORMATION 0x81
+#define CEC_MSG_SET_STREAM_PATH 0x86
+
+
+/* Standby Feature */
+#define CEC_MSG_STANDBY 0x36
+
+
+/* One Touch Record Feature */
+#define CEC_MSG_RECORD_OFF 0x0b
+#define CEC_MSG_RECORD_ON 0x09
+/* Record Source Type Operand (rec_src_type) */
+#define CEC_OP_RECORD_SRC_OWN 1
+#define CEC_OP_RECORD_SRC_DIGITAL 2
+#define CEC_OP_RECORD_SRC_ANALOG 3
+#define CEC_OP_RECORD_SRC_EXT_PLUG 4
+#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5
+/* Service Identification Method Operand (service_id_method) */
+#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0
+#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1
+/* Digital Service Broadcast System Operand (dig_bcast_system) */
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b
+/* Analogue Broadcast Type Operand (ana_bcast_type) */
+#define CEC_OP_ANA_BCAST_TYPE_CABLE 0
+#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1
+#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2
+/* Broadcast System Operand (bcast_system) */
+#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00
+#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */
+#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02
+#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03
+#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04
+#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05
+#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06
+#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07
+#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08
+#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f
+/* Channel Number Format Operand (channel_number_fmt) */
+#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01
+#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02
+
+#define CEC_MSG_RECORD_STATUS 0x0a
+/* Record Status Operand (rec_status) */
+#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01
+#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02
+#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03
+#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04
+#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05
+#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06
+#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a
+#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b
+#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c
+#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d
+#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e
+#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10
+#define CEC_OP_RECORD_STATUS_PLAYING 0x11
+#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12
+#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13
+#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14
+#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15
+#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16
+#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17
+#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a
+#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b
+#define CEC_OP_RECORD_STATUS_OTHER 0x1f
+
+#define CEC_MSG_RECORD_TV_SCREEN 0x0f
+
+
+/* Timer Programming Feature */
+#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33
+/* Recording Sequence Operand (recording_seq) */
+#define CEC_OP_REC_SEQ_SUNDAY 0x01
+#define CEC_OP_REC_SEQ_MONDAY 0x02
+#define CEC_OP_REC_SEQ_TUESDAY 0x04
+#define CEC_OP_REC_SEQ_WEDNESDAY 0x08
+#define CEC_OP_REC_SEQ_THURSDAY 0x10
+#define CEC_OP_REC_SEQ_FRIDAY 0x20
+#define CEC_OP_REC_SEQ_SATERDAY 0x40
+#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00
+
+#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99
+
+#define CEC_MSG_CLEAR_EXT_TIMER 0xa1
+/* External Source Specifier Operand (ext_src_spec) */
+#define CEC_OP_EXT_SRC_PLUG 0x04
+#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05
+
+#define CEC_MSG_SET_ANALOGUE_TIMER 0x34
+#define CEC_MSG_SET_DIGITAL_TIMER 0x97
+#define CEC_MSG_SET_EXT_TIMER 0xa2
+
+#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67
+#define CEC_MSG_TIMER_CLEARED_STATUS 0x43
+/* Timer Cleared Status Data Operand (timer_cleared_status) */
+#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00
+#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01
+#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02
+#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80
+
+#define CEC_MSG_TIMER_STATUS 0x35
+/* Timer Overlap Warning Operand (timer_overlap_warning) */
+#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0
+#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1
+/* Media Info Operand (media_info) */
+#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0
+#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1
+#define CEC_OP_MEDIA_INFO_NO_MEDIA 2
+/* Programmed Indicator Operand (prog_indicator) */
+#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0
+#define CEC_OP_PROG_IND_PROGRAMMED 1
+/* Programmed Info Operand (prog_info) */
+#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08
+#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09
+#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b
+#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a
+/* Not Programmed Error Info Operand (prog_error) */
+#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01
+#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02
+#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03
+#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04
+#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05
+#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06
+#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07
+#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08
+#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09
+#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a
+#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e
+
+
+/* System Information Feature */
+#define CEC_MSG_CEC_VERSION 0x9e
+/* CEC Version Operand (cec_version) */
+#define CEC_OP_CEC_VERSION_1_3A 4
+#define CEC_OP_CEC_VERSION_1_4 5
+#define CEC_OP_CEC_VERSION_2_0 6
+
+#define CEC_MSG_GET_CEC_VERSION 0x9f
+#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83
+#define CEC_MSG_GET_MENU_LANGUAGE 0x91
+#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84
+/* Primary Device Type Operand (prim_devtype) */
+#define CEC_OP_PRIM_DEVTYPE_TV 0
+#define CEC_OP_PRIM_DEVTYPE_RECORD 1
+#define CEC_OP_PRIM_DEVTYPE_TUNER 3
+#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4
+#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5
+#define CEC_OP_PRIM_DEVTYPE_SWITCH 6
+#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7
+
+#define CEC_MSG_SET_MENU_LANGUAGE 0x32
+#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */
+/* All Device Types Operand (all_device_types) */
+#define CEC_OP_ALL_DEVTYPE_TV 0x80
+#define CEC_OP_ALL_DEVTYPE_RECORD 0x40
+#define CEC_OP_ALL_DEVTYPE_TUNER 0x20
+#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10
+#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08
+#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04
+/*
+ * And if you wondering what happened to PROCESSOR devices: those should
+ * be mapped to a SWITCH.
+ */
+
+/* Valid for RC Profile and Device Feature operands */
+#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */
+/* RC Profile Operand (rc_profile) */
+#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00
+#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02
+#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06
+#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a
+#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48
+#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41
+/* Device Feature Operand (dev_features) */
+#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40
+#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20
+#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08
+#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04
+#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02
+
+#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */
+
+
+/* Deck Control Feature */
+#define CEC_MSG_DECK_CONTROL 0x42
+/* Deck Control Mode Operand (deck_control_mode) */
+#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1
+#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2
+#define CEC_OP_DECK_CTL_MODE_STOP 3
+#define CEC_OP_DECK_CTL_MODE_EJECT 4
+
+#define CEC_MSG_DECK_STATUS 0x1b
+/* Deck Info Operand (deck_info) */
+#define CEC_OP_DECK_INFO_PLAY 0x11
+#define CEC_OP_DECK_INFO_RECORD 0x12
+#define CEC_OP_DECK_INFO_PLAY_REV 0x13
+#define CEC_OP_DECK_INFO_STILL 0x14
+#define CEC_OP_DECK_INFO_SLOW 0x15
+#define CEC_OP_DECK_INFO_SLOW_REV 0x16
+#define CEC_OP_DECK_INFO_FAST_FWD 0x17
+#define CEC_OP_DECK_INFO_FAST_REV 0x18
+#define CEC_OP_DECK_INFO_NO_MEDIA 0x19
+#define CEC_OP_DECK_INFO_STOP 0x1a
+#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b
+#define CEC_OP_DECK_INFO_SKIP_REV 0x1c
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e
+#define CEC_OP_DECK_INFO_OTHER 0x1f
+
+#define CEC_MSG_GIVE_DECK_STATUS 0x1a
+/* Status Request Operand (status_req) */
+#define CEC_OP_STATUS_REQ_ON 1
+#define CEC_OP_STATUS_REQ_OFF 2
+#define CEC_OP_STATUS_REQ_ONCE 3
+
+#define CEC_MSG_PLAY 0x41
+/* Play Mode Operand (play_mode) */
+#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24
+#define CEC_OP_PLAY_MODE_PLAY_REV 0x20
+#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b
+
+
+/* Tuner Control Feature */
+#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08
+#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92
+#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
+#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
+/* Recording Flag Operand (rec_flag) */
+#define CEC_OP_REC_FLAG_USED 0
+#define CEC_OP_REC_FLAG_NOT_USED 1
+/* Tuner Display Info Operand (tuner_display_info) */
+#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
+#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
+#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2
+
+#define CEC_MSG_TUNER_STEP_DECREMENT 0x06
+#define CEC_MSG_TUNER_STEP_INCREMENT 0x05
+
+
+/* Vendor Specific Commands Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_CEC_VERSION
+ * CEC_MSG_GET_CEC_VERSION
+ */
+#define CEC_MSG_DEVICE_VENDOR_ID 0x87
+#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c
+#define CEC_MSG_VENDOR_COMMAND 0x89
+#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b
+
+
+/* OSD Display Feature */
+#define CEC_MSG_SET_OSD_STRING 0x64
+/* Display Control Operand (disp_ctl) */
+#define CEC_OP_DISP_CTL_DEFAULT 0x00
+#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40
+#define CEC_OP_DISP_CTL_CLEAR 0x80
+
+
+/* Device OSD Transfer Feature */
+#define CEC_MSG_GIVE_OSD_NAME 0x46
+#define CEC_MSG_SET_OSD_NAME 0x47
+
+
+/* Device Menu Control Feature */
+#define CEC_MSG_MENU_REQUEST 0x8d
+/* Menu Request Type Operand (menu_req) */
+#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00
+#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01
+#define CEC_OP_MENU_REQUEST_QUERY 0x02
+
+#define CEC_MSG_MENU_STATUS 0x8e
+/* Menu State Operand (menu_state) */
+#define CEC_OP_MENU_STATE_ACTIVATED 0x00
+#define CEC_OP_MENU_STATE_DEACTIVATED 0x01
+
+#define CEC_MSG_USER_CONTROL_PRESSED 0x44
+/* UI Broadcast Type Operand (ui_bcast_type) */
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91
+#define CEC_OP_UI_BCAST_TYPE_IP 0xa0
+/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */
+#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10
+#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20
+#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80
+#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90
+#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0
+#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1
+#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2
+#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3
+
+#define CEC_MSG_USER_CONTROL_RELEASED 0x45
+
+
+/* Remote Control Passthrough Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_USER_CONTROL_PRESSED
+ * CEC_MSG_USER_CONTROL_RELEASED
+ */
+
+
+/* Power Status Feature */
+#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f
+#define CEC_MSG_REPORT_POWER_STATUS 0x90
+/* Power Status Operand (pwr_state) */
+#define CEC_OP_POWER_STATUS_ON 0
+#define CEC_OP_POWER_STATUS_STANDBY 1
+#define CEC_OP_POWER_STATUS_TO_ON 2
+#define CEC_OP_POWER_STATUS_TO_STANDBY 3
+
+
+/* General Protocol Messages */
+#define CEC_MSG_FEATURE_ABORT 0x00
+/* Abort Reason Operand (reason) */
+#define CEC_OP_ABORT_UNRECOGNIZED_OP 0
+#define CEC_OP_ABORT_INCORRECT_MODE 1
+#define CEC_OP_ABORT_NO_SOURCE 2
+#define CEC_OP_ABORT_INVALID_OP 3
+#define CEC_OP_ABORT_REFUSED 4
+#define CEC_OP_ABORT_UNDETERMINED 5
+
+#define CEC_MSG_ABORT 0xff
+
+
+/* System Audio Control Feature */
+
+/*
+ * Has also:
+ * CEC_MSG_USER_CONTROL_PRESSED
+ * CEC_MSG_USER_CONTROL_RELEASED
+ */
+#define CEC_MSG_GIVE_AUDIO_STATUS 0x71
+#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d
+#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a
+/* Audio Mute Status Operand (aud_mute_status) */
+#define CEC_OP_AUD_MUTE_STATUS_OFF 0
+#define CEC_OP_AUD_MUTE_STATUS_ON 1
+
+#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3
+#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4
+#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72
+/* System Audio Status Operand (sys_aud_status) */
+#define CEC_OP_SYS_AUD_STATUS_OFF 0
+#define CEC_OP_SYS_AUD_STATUS_ON 1
+
+#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70
+#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e
+/* Audio Format ID Operand (audio_format_id) */
+#define CEC_OP_AUD_FMT_ID_CEA861 0
+#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1
+
+
+/* Audio Rate Control Feature */
+#define CEC_MSG_SET_AUDIO_RATE 0x9a
+/* Audio Rate Operand (audio_rate) */
+#define CEC_OP_AUD_RATE_OFF 0
+#define CEC_OP_AUD_RATE_WIDE_STD 1
+#define CEC_OP_AUD_RATE_WIDE_FAST 2
+#define CEC_OP_AUD_RATE_WIDE_SLOW 3
+#define CEC_OP_AUD_RATE_NARROW_STD 4
+#define CEC_OP_AUD_RATE_NARROW_FAST 5
+#define CEC_OP_AUD_RATE_NARROW_SLOW 6
+
+
+/* Audio Return Channel Control Feature */
+#define CEC_MSG_INITIATE_ARC 0xc0
+#define CEC_MSG_REPORT_ARC_INITIATED 0xc1
+#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2
+#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3
+#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4
+#define CEC_MSG_TERMINATE_ARC 0xc5
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7
+#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8
+/* Low Latency Mode Operand (low_latency_mode) */
+#define CEC_OP_LOW_LATENCY_MODE_OFF 0
+#define CEC_OP_LOW_LATENCY_MODE_ON 1
+/* Audio Output Compensated Operand (audio_out_compensated) */
+#define CEC_OP_AUD_OUT_COMPENSATED_NA 0
+#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1
+#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2
+#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3
+
+
+/* Capability Discovery and Control Feature */
+#define CEC_MSG_CDC_MESSAGE 0xf8
+/* Ethernet-over-HDMI: nobody ever does this... */
+#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00
+#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01
+/* HEC Functionality State Operand (hec_func_state) */
+#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0
+#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1
+#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2
+#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3
+/* Host Functionality State Operand (host_func_state) */
+#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0
+#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1
+#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2
+/* ENC Functionality State Operand (enc_func_state) */
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2
+/* CDC Error Code Operand (cdc_errcode) */
+#define CEC_OP_CDC_ERROR_CODE_NONE 0
+#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1
+#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2
+#define CEC_OP_CDC_ERROR_CODE_OTHER 3
+/* HEC Support Operand (hec_support) */
+#define CEC_OP_HEC_SUPPORT_NO 0
+#define CEC_OP_HEC_SUPPORT_YES 1
+/* HEC Activation Operand (hec_activation) */
+#define CEC_OP_HEC_ACTIVATION_ON 0
+#define CEC_OP_HEC_ACTIVATION_OFF 1
+
+#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02
+#define CEC_MSG_CDC_HEC_SET_STATE 0x03
+/* HEC Set State Operand (hec_set_state) */
+#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0
+#define CEC_OP_HEC_SET_STATE_ACTIVATE 1
+
+#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04
+#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05
+#define CEC_MSG_CDC_HEC_DISCOVER 0x06
+/* Hotplug Detect messages */
+#define CEC_MSG_CDC_HPD_SET_STATE 0x10
+/* HPD State Operand (hpd_state) */
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0
+#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2
+#define CEC_OP_HPD_STATE_EDID_DISABLE 3
+#define CEC_OP_HPD_STATE_EDID_ENABLE 4
+#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5
+#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11
+/* HPD Error Code Operand (hpd_error) */
+#define CEC_OP_HPD_ERROR_NONE 0
+#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1
+#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2
+#define CEC_OP_HPD_ERROR_OTHER 3
+#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4
+
+#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index dfce61600..7868d602c 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -34,9 +34,9 @@
#define CEPH_MAX_MON 31
/*
- * ceph_file_layout - describe data layout for a file/inode
+ * legacy ceph_file_layoute
*/
-struct ceph_file_layout {
+struct ceph_file_layout_legacy {
/* file -> object mapping */
__le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
of page size. */
@@ -53,33 +53,27 @@ struct ceph_file_layout {
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
-#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
-#define ceph_file_layout_stripe_count(l) \
- ((__s32)le32_to_cpu((l).fl_stripe_count))
-#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
-#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
-#define ceph_file_layout_object_su(l) \
- ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_pool(l) \
- ((__s32)le32_to_cpu((l).fl_pg_pool))
-
-static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
-{
- return le32_to_cpu(l->fl_stripe_unit) *
- le32_to_cpu(l->fl_stripe_count);
-}
-
-/* "period" == bytes before i start on a new set of objects */
-static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
-{
- return le32_to_cpu(l->fl_object_size) *
- le32_to_cpu(l->fl_stripe_count);
-}
+struct ceph_string;
+/*
+ * ceph_file_layout - describe data layout for a file/inode
+ */
+struct ceph_file_layout {
+ /* file -> object mapping */
+ u32 stripe_unit; /* stripe unit, in bytes */
+ u32 stripe_count; /* over this many objects */
+ u32 object_size; /* until objects are this big */
+ s64 pool_id; /* rados pool id */
+ struct ceph_string __rcu *pool_ns; /* rados pool namespace */
+};
+
+extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
+extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy);
+extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy);
#define CEPH_MIN_STRIPE_UNIT 65536
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
-
struct ceph_dir_layout {
__u8 dl_dir_hash; /* see ceph_hash.h for ids */
__u8 dl_unused1;
@@ -127,6 +121,7 @@ struct ceph_dir_layout {
/* client <-> mds */
#define CEPH_MSG_MDS_MAP 21
+#define CEPH_MSG_FS_MAP_USER 103
#define CEPH_MSG_CLIENT_SESSION 22
#define CEPH_MSG_CLIENT_RECONNECT 23
@@ -399,7 +394,7 @@ union ceph_mds_request_args {
__le32 flags;
} __attribute__ ((packed)) setxattr;
struct {
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
} __attribute__ ((packed)) setlayout;
struct {
__u8 rule; /* currently fcntl or flock */
@@ -478,7 +473,7 @@ struct ceph_mds_reply_inode {
__le64 version; /* inode version */
__le64 xattr_version; /* version for xattr blob */
struct ceph_mds_reply_cap cap; /* caps issued for this inode */
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
struct ceph_timespec ctime, mtime, atime;
__le32 time_warp_seq;
__le64 size, max_size, truncate_size;
@@ -531,7 +526,7 @@ struct ceph_filelock {
#define CEPH_FILE_MODE_WR 2
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
-#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
+#define CEPH_FILE_MODE_BITS 4
int ceph_flags_to_mode(int flags);
@@ -673,7 +668,7 @@ struct ceph_mds_caps {
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
__le32 time_warp_seq;
} __attribute__ ((packed));
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 19e9932f3..f990f2cc9 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -3,6 +3,7 @@
#include <linux/err.h>
#include <linux/bug.h>
+#include <linux/slab.h>
#include <linux/time.h>
#include <asm/unaligned.h>
@@ -217,6 +218,60 @@ static inline void ceph_encode_string(void **p, void *end,
*p += len;
}
+/*
+ * version and length starting block encoders/decoders
+ */
+
+/* current code version (u8) + compat code version (u8) + len of struct (u32) */
+#define CEPH_ENCODING_START_BLK_LEN 6
+
+/**
+ * ceph_start_encoding - start encoding block
+ * @struct_v: current (code) version of the encoding
+ * @struct_compat: oldest code version that can decode it
+ * @struct_len: length of struct encoding
+ */
+static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat,
+ u32 struct_len)
+{
+ ceph_encode_8(p, struct_v);
+ ceph_encode_8(p, struct_compat);
+ ceph_encode_32(p, struct_len);
+}
+
+/**
+ * ceph_start_decoding - start decoding block
+ * @v: current version of the encoding that the code supports
+ * @name: name of the struct (free-form)
+ * @struct_v: out param for the encoding version
+ * @struct_len: out param for the length of struct encoding
+ *
+ * Validates the length of struct encoding, so unsafe ceph_decode_*
+ * variants can be used for decoding.
+ */
+static inline int ceph_start_decoding(void **p, void *end, u8 v,
+ const char *name, u8 *struct_v,
+ u32 *struct_len)
+{
+ u8 struct_compat;
+
+ ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad);
+ *struct_v = ceph_decode_8(p);
+ struct_compat = ceph_decode_8(p);
+ if (v < struct_compat) {
+ pr_warn("got struct_v %d struct_compat %d > %d of %s\n",
+ *struct_v, struct_compat, v, name);
+ return -EINVAL;
+ }
+
+ *struct_len = ceph_decode_32(p);
+ ceph_decode_need(p, end, *struct_len, bad);
+ return 0;
+
+bad:
+ return -ERANGE;
+}
+
#define ceph_encode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 6b79a6ba3..83fc1fff7 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -21,6 +21,7 @@
#include <linux/ceph/mon_client.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/ceph_fs.h>
+#include <linux/ceph/string_table.h>
/*
* mount options
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index e2a92df08..24d704d1e 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -95,7 +95,7 @@ struct ceph_mon_client {
struct ceph_mon_subscribe_item item;
bool want;
u32 have; /* epoch */
- } subs[3];
+ } subs[4];
int fs_cluster_id; /* "mdsmap.<id>" sub */
#ifdef CONFIG_DEBUG_FS
@@ -111,9 +111,10 @@ extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
enum {
- CEPH_SUB_MDSMAP = 0,
- CEPH_SUB_MONMAP,
+ CEPH_SUB_MONMAP = 0,
CEPH_SUB_OSDMAP,
+ CEPH_SUB_FSMAP,
+ CEPH_SUB_MDSMAP,
};
extern const char *ceph_sub_str[];
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index 4b0d38960..ddd0d48d0 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -2,7 +2,6 @@
#define _FS_CEPH_MSGPOOL
#include <linux/mempool.h>
-#include <linux/ceph/messenger.h>
/*
* we use memory pools for preallocating messages we may receive, to
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 1b3b6e155..858932304 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -9,6 +9,7 @@
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
#include <linux/ceph/messenger.h>
+#include <linux/ceph/msgpool.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 21d7f0489..9a9041784 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -63,11 +63,13 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
struct ceph_object_locator {
s64 pool;
+ struct ceph_string *pool_ns;
};
static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
{
oloc->pool = -1;
+ oloc->pool_ns = NULL;
}
static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
@@ -75,11 +77,9 @@ static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
return oloc->pool == -1;
}
-static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
- const struct ceph_object_locator *src)
-{
- dest->pool = src->pool;
-}
+void ceph_oloc_copy(struct ceph_object_locator *dest,
+ const struct ceph_object_locator *src);
+void ceph_oloc_destroy(struct ceph_object_locator *oloc);
/*
* Maximum supported by kernel client object name length
diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h
new file mode 100644
index 000000000..1b02c96da
--- /dev/null
+++ b/include/linux/ceph/string_table.h
@@ -0,0 +1,62 @@
+#ifndef _FS_CEPH_STRING_TABLE_H
+#define _FS_CEPH_STRING_TABLE_H
+
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+struct ceph_string {
+ struct kref kref;
+ union {
+ struct rb_node node;
+ struct rcu_head rcu;
+ };
+ size_t len;
+ char str[];
+};
+
+extern void ceph_release_string(struct kref *ref);
+extern struct ceph_string *ceph_find_or_create_string(const char *str,
+ size_t len);
+extern bool ceph_strings_empty(void);
+
+static inline struct ceph_string *ceph_get_string(struct ceph_string *str)
+{
+ kref_get(&str->kref);
+ return str;
+}
+
+static inline void ceph_put_string(struct ceph_string *str)
+{
+ if (!str)
+ return;
+ kref_put(&str->kref, ceph_release_string);
+}
+
+static inline int ceph_compare_string(struct ceph_string *cs,
+ const char* str, size_t len)
+{
+ size_t cs_len = cs ? cs->len : 0;
+ if (cs_len != len)
+ return cs_len - len;
+ if (len == 0)
+ return 0;
+ return strncmp(cs->str, str, len);
+}
+
+#define ceph_try_get_string(x) \
+({ \
+ struct ceph_string *___str; \
+ rcu_read_lock(); \
+ for (;;) { \
+ ___str = rcu_dereference(x); \
+ if (!___str || \
+ kref_get_unless_zero(&___str->kref)) \
+ break; \
+ } \
+ rcu_read_unlock(); \
+ (___str); \
+})
+
+#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a20320c66..984f73b71 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -87,6 +87,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
struct cgroup *cgroup_get_from_path(const char *path);
+struct cgroup *cgroup_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index fb39d5add..a39c0c530 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -33,6 +33,8 @@
#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
+/* parents need enable during gate/ungate, set rate and re-parent */
+#define CLK_OPS_PARENT_ENABLE BIT(12)
struct clk;
struct clk_hw;
@@ -293,6 +295,7 @@ void clk_unregister_fixed_rate(struct clk *clk);
struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy);
+void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
void of_fixed_clk_setup(struct device_node *np);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0df4a51e1..123c02788 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -20,8 +20,6 @@ struct device;
struct clk;
-#ifdef CONFIG_COMMON_CLK
-
/**
* DOC: clk notifier callback types
*
@@ -78,6 +76,8 @@ struct clk_notifier_data {
unsigned long new_rate;
};
+#ifdef CONFIG_COMMON_CLK
+
/**
* clk_notifier_register: register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
@@ -140,6 +140,18 @@ bool clk_is_match(const struct clk *p, const struct clk *q);
#else
+static inline int clk_notifier_register(struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int clk_notifier_unregister(struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
static inline long clk_get_accuracy(struct clk *clk)
{
return -ENOTSUPP;
@@ -461,6 +473,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
return NULL;
}
+static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ return NULL;
+}
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 44a1aff22..08398182f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
extern int clocksource_i8253_init(void);
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- OF_DECLARE_1(clksrc, name, compat, fn)
+ OF_DECLARE_1_RET(clksrc, name, compat, fn)
#ifdef CONFIG_CLKSRC_PROBE
extern void clocksource_probe(void);
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a58c852a2..d4e106b5d 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,18 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
+/*
+ * Determines how hard direct compaction should try to succeed.
+ * Lower value means higher priority, analogically to reclaim priority.
+ */
+enum compact_priority {
+ COMPACT_PRIO_SYNC_LIGHT,
+ MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ COMPACT_PRIO_ASYNC,
+ INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
+};
+
/* Return values for compact_zone() and try_to_compact_pages() */
/* When adding new states, please adjust include/trace/events/compaction.h */
enum compact_result {
@@ -43,14 +55,6 @@ enum compact_result {
COMPACT_PARTIAL,
};
-/* Used to signal whether compaction detected need_sched() or lock contention */
-/* No contention detected */
-#define COMPACT_CONTENDED_NONE 0
-/* Either need_sched() was true or fatal signal pending */
-#define COMPACT_CONTENDED_SCHED 1
-/* Zone lock or lru_lock was contended in async compaction */
-#define COMPACT_CONTENDED_LOCK 2
-
struct alloc_context; /* in mm/internal.h */
#ifdef CONFIG_COMPACTION
@@ -64,9 +68,8 @@ extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
- unsigned int order,
- unsigned int alloc_flags, const struct alloc_context *ac,
- enum migrate_mode mode, int *contended);
+ unsigned int order, unsigned int alloc_flags,
+ const struct alloc_context *ac, enum compact_priority prio);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
@@ -151,14 +154,6 @@ extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
#else
-static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
- unsigned int order, int alloc_flags,
- const struct alloc_context *ac,
- enum migrate_mode mode, int *contended)
-{
- return COMPACT_CONTINUE;
-}
-
static inline void compact_pgdat(pg_data_t *pgdat, int order)
{
}
@@ -212,6 +207,7 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+struct node;
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e2949397c..573c5a189 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -158,7 +158,7 @@
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
@@ -242,7 +242,11 @@
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+/*
+ * sparse (__CHECKER__) pretends to be gcc, but can't do constant
+ * folding in __builtin_bswap*() (yet), so don't set these for it.
+ */
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@@ -250,7 +254,7 @@
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 793c0829e..668569844 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,7 +17,6 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
-# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else /* CONFIG_SPARSE_RCU_POINTER */
@@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
-# define __pmem
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */
@@ -304,23 +302,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__u.__val; \
})
-/**
- * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
- * @cond: boolean expression to wait for
- *
- * Equivalent to using smp_load_acquire() on the condition variable but employs
- * the control dependency of the wait to reduce the barrier on many platforms.
- *
- * The control dependency provides a LOAD->STORE order, the additional RMB
- * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
- * aka. ACQUIRE.
- */
-#define smp_cond_acquire(cond) do { \
- while (!(cond)) \
- cpu_relax(); \
- smp_rmb(); /* ctrl + rmb := acquire */ \
-} while (0)
-
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -545,10 +526,15 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
+ *
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
+ typeof(*(p)) *___typecheck_p __maybe_unused; \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/console.h b/include/linux/console.h
index 98c8615dc..d530c4627 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -28,6 +28,13 @@ struct tty_struct;
#define VT100ID "\033[?1;2c"
#define VT102ID "\033[?6c"
+/**
+ * struct consw - callbacks for consoles
+ *
+ * @con_set_palette: sets the palette of the console to @table (optional)
+ * @con_scrolldelta: the contents of the console should be scrolled by @lines.
+ * Invoked by user. (optional)
+ */
struct consw {
struct module *owner;
const char *(*con_startup)(void);
@@ -38,7 +45,6 @@ struct consw {
void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
void (*con_cursor)(struct vc_data *, int);
int (*con_scroll)(struct vc_data *, int, int, int, int);
- void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -47,8 +53,9 @@ struct consw {
int (*con_font_copy)(struct vc_data *, int);
int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
unsigned int);
- int (*con_set_palette)(struct vc_data *, const unsigned char *);
- int (*con_scrolldelta)(struct vc_data *, int);
+ void (*con_set_palette)(struct vc_data *,
+ const unsigned char *table);
+ void (*con_scrolldelta)(struct vc_data *, int lines);
int (*con_set_origin)(struct vc_data *);
void (*con_save_screen)(struct vc_data *);
u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index e329ee266..6fd3c908a 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -21,6 +21,38 @@ struct uni_pagedir;
#define NPAR 16
+/*
+ * Example: vc_data of a console that was scrolled 3 lines down.
+ *
+ * Console buffer
+ * vc_screenbuf ---------> +----------------------+-.
+ * | initializing W | \
+ * | initializing X | |
+ * | initializing Y | > scroll-back area
+ * | initializing Z | |
+ * | | /
+ * vc_visible_origin ---> ^+----------------------+-:
+ * (changes by scroll) || Welcome to linux | \
+ * || | |
+ * vc_rows --->< | login: root | | visible on console
+ * || password: | > (vc_screenbuf_size is
+ * vc_origin -----------> || | | vc_size_row * vc_rows)
+ * (start when no scroll) || Last login: 12:28 | /
+ * v+----------------------+-:
+ * | Have a lot of fun... | \
+ * vc_pos -----------------|--------v | > scroll-front area
+ * | ~ # cat_ | /
+ * vc_scr_end -----------> +----------------------+-:
+ * (vc_origin + | | \ EMPTY, to be filled by
+ * vc_screenbuf_size) | | / vc_video_erase_char
+ * +----------------------+-'
+ * <---- 2 * vc_cols ----->
+ * <---- vc_size_row ----->
+ *
+ * Note that every character in the console buffer is accompanied with an
+ * attribute in the buffer right after the character. This is not depicted
+ * in the figure.
+ */
struct vc_data {
struct tty_port port; /* Upper level data */
@@ -74,7 +106,6 @@ struct vc_data {
unsigned int vc_decawm : 1; /* Autowrap Mode */
unsigned int vc_deccm : 1; /* Cursor Visible */
unsigned int vc_decim : 1; /* Insert Mode */
- unsigned int vc_deccolm : 1; /* 80/132 Column Mode */
/* attribute flags */
unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
unsigned int vc_italic:1;
@@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work);
#define CUR_DEFAULT CUR_UNDERLINE
-#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp)
+static inline bool con_is_visible(const struct vc_data *vc)
+{
+ return *vc->vc_display_fg == vc;
+}
#endif /* _LINUX_CONSOLE_STRUCT_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d25927423..c78fc2741 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -31,6 +31,19 @@ static inline void user_exit(void)
context_tracking_exit(CONTEXT_USER);
}
+/* Called with interrupts disabled. */
+static inline void user_enter_irqoff(void)
+{
+ if (context_tracking_is_enabled())
+ __context_tracking_enter(CONTEXT_USER);
+
+}
+static inline void user_exit_irqoff(void)
+{
+ if (context_tracking_is_enabled())
+ __context_tracking_exit(CONTEXT_USER);
+}
+
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
@@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void)
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
+static inline void user_enter_irqoff(void) { }
+static inline void user_exit_irqoff(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
@@ -84,7 +99,8 @@ static inline void context_tracking_init(void) { }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-static inline void guest_enter(void)
+/* must be called with irqs disabled */
+static inline void guest_enter_irqoff(void)
{
if (vtime_accounting_cpu_enabled())
vtime_guest_enter(current);
@@ -93,9 +109,19 @@ static inline void guest_enter(void)
if (context_tracking_is_enabled())
__context_tracking_enter(CONTEXT_GUEST);
+
+ /* KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
}
-static inline void guest_exit(void)
+static inline void guest_exit_irqoff(void)
{
if (context_tracking_is_enabled())
__context_tracking_exit(CONTEXT_GUEST);
@@ -107,7 +133,7 @@ static inline void guest_exit(void)
}
#else
-static inline void guest_enter(void)
+static inline void guest_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe
@@ -116,9 +142,10 @@ static inline void guest_enter(void)
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
+ rcu_virt_note_context_switch(smp_processor_id());
}
-static inline void guest_exit(void)
+static inline void guest_exit_irqoff(void)
{
/* Flush the guest cputime we spent on the guest */
vtime_account_system(current);
@@ -126,4 +153,22 @@ static inline void guest_exit(void)
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_enter_irqoff();
+ local_irq_restore(flags);
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 21597dcac..797d9c8e9 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -55,17 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
-/*
- * CPU notifier priorities.
- */
-enum {
- CPU_PRI_PERF = 20,
-
- /* bring up workqueues before normal notifiers and down after */
- CPU_PRI_WORKQUEUE_UP = 5,
- CPU_PRI_WORKQUEUE_DOWN = -5,
-};
-
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4e81e08db..631ba33bb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -36,6 +36,12 @@
struct cpufreq_governor;
+enum cpufreq_table_sorting {
+ CPUFREQ_TABLE_UNSORTED,
+ CPUFREQ_TABLE_SORTED_ASCENDING,
+ CPUFREQ_TABLE_SORTED_DESCENDING
+};
+
struct cpufreq_freqs {
unsigned int cpu; /* cpu nr */
unsigned int old;
@@ -87,6 +93,7 @@ struct cpufreq_policy {
struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table;
+ enum cpufreq_table_sorting freq_table_sorted;
struct list_head policy_list;
struct kobject kobj;
@@ -113,6 +120,10 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ unsigned int cached_target_freq;
+ int cached_resolved_idx;
+
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
@@ -185,6 +196,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
static inline void disable_cpufreq(void) { }
#endif
+#ifdef CONFIG_CPU_FREQ_STAT
+void cpufreq_stats_create_table(struct cpufreq_policy *policy);
+void cpufreq_stats_free_table(struct cpufreq_policy *policy);
+void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq);
+#else
+static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
+static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
+static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq) { }
+#endif /* CONFIG_CPU_FREQ_STAT */
+
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
*********************************************************************/
@@ -251,6 +274,16 @@ struct cpufreq_driver {
unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq);
+
+ /*
+ * Caches and returns the lowest driver-supported frequency greater than
+ * or equal to the target frequency, subject to any driver limitations.
+ * Does not set the frequency. Only to be implemented for drivers with
+ * target().
+ */
+ unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
+ unsigned int target_freq);
+
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
@@ -455,18 +488,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define MIN_LATENCY_MULTIPLIER (20)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-/* Governor Events */
-#define CPUFREQ_GOV_START 1
-#define CPUFREQ_GOV_STOP 2
-#define CPUFREQ_GOV_LIMITS 3
-#define CPUFREQ_GOV_POLICY_INIT 4
-#define CPUFREQ_GOV_POLICY_EXIT 5
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
- int initialized;
- int (*governor) (struct cpufreq_policy *policy,
- unsigned int event);
+ int (*init)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
+ int (*start)(struct cpufreq_policy *policy);
+ void (*stop)(struct cpufreq_policy *policy);
+ void (*limits)(struct cpufreq_policy *policy);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
@@ -487,12 +515,22 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
+{
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+}
+
/* Governor attribute set */
struct gov_attr_set {
struct kobject kobj;
@@ -582,11 +620,9 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
-int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table,
- unsigned int target_freq,
- unsigned int relation,
- unsigned int *index);
+int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
@@ -597,6 +633,227 @@ int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_enabled(void);
int cpufreq_enable_boost_support(void);
bool policy_has_boost_freq(struct cpufreq_policy *policy);
+
+/* Find lowest freq at or above target in a table in ascending order */
+static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq >= target_freq)
+ return i;
+
+ best = i;
+ }
+
+ return best;
+}
+
+/* Find lowest freq at or above target in a table in descending order */
+static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq == target_freq)
+ return i;
+
+ if (freq > target_freq) {
+ best = i;
+ continue;
+ }
+
+ /* No freq found above target_freq */
+ if (best == -1)
+ return i;
+
+ return best;
+ }
+
+ return best;
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_al(policy, target_freq);
+ else
+ return cpufreq_table_find_index_dl(policy, target_freq);
+}
+
+/* Find highest freq at or below target in a table in ascending order */
+static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq == target_freq)
+ return i;
+
+ if (freq < target_freq) {
+ best = i;
+ continue;
+ }
+
+ /* No freq found below target_freq */
+ if (best == -1)
+ return i;
+
+ return best;
+ }
+
+ return best;
+}
+
+/* Find highest freq at or below target in a table in descending order */
+static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq <= target_freq)
+ return i;
+
+ best = i;
+ }
+
+ return best;
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_ah(policy, target_freq);
+ else
+ return cpufreq_table_find_index_dh(policy, target_freq);
+}
+
+/* Find closest freq to target in a table in ascending order */
+static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq == target_freq)
+ return i;
+
+ if (freq < target_freq) {
+ best = i;
+ continue;
+ }
+
+ /* No freq found below target_freq */
+ if (best == -1)
+ return i;
+
+ /* Choose the closest freq */
+ if (target_freq - table[best].frequency > freq - target_freq)
+ return i;
+
+ return best;
+ }
+
+ return best;
+}
+
+/* Find closest freq to target in a table in descending order */
+static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq;
+ int i, best = -1;
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ freq = table[i].frequency;
+
+ if (freq == target_freq)
+ return i;
+
+ if (freq > target_freq) {
+ best = i;
+ continue;
+ }
+
+ /* No freq found above target_freq */
+ if (best == -1)
+ return i;
+
+ /* Choose the closest freq */
+ if (table[best].frequency - target_freq > target_freq - freq)
+ return i;
+
+ return best;
+ }
+
+ return best;
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+ return cpufreq_table_find_index_ac(policy, target_freq);
+ else
+ return cpufreq_table_find_index_dc(policy, target_freq);
+}
+
+static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
+ return cpufreq_table_index_unsorted(policy, target_freq,
+ relation);
+
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+ return cpufreq_table_find_index_l(policy, target_freq);
+ case CPUFREQ_RELATION_H:
+ return cpufreq_table_find_index_h(policy, target_freq);
+ case CPUFREQ_RELATION_C:
+ return cpufreq_table_find_index_c(policy, target_freq);
+ default:
+ pr_err("%s: Invalid relation: %d\n", __func__, relation);
+ return -EINVAL;
+ }
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -617,8 +874,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
#endif
-/* the following funtion is for cpufreq core use only */
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 386374d19..34bd80512 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,22 +1,100 @@
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
+#include <linux/types.h>
+
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
+ CPUHP_PERF_PREPARE,
+ CPUHP_PERF_X86_PREPARE,
+ CPUHP_PERF_X86_UNCORE_PREP,
+ CPUHP_PERF_X86_AMD_UNCORE_PREP,
+ CPUHP_PERF_X86_RAPL_PREP,
+ CPUHP_PERF_BFIN,
+ CPUHP_PERF_POWER,
+ CPUHP_PERF_SUPERH,
+ CPUHP_X86_HPET_DEAD,
+ CPUHP_X86_APB_DEAD,
+ CPUHP_WORKQUEUE_PREP,
+ CPUHP_POWER_NUMA_PREPARE,
+ CPUHP_HRTIMERS_PREPARE,
+ CPUHP_PROFILE_PREPARE,
+ CPUHP_X2APIC_PREPARE,
+ CPUHP_SMPCFD_PREPARE,
+ CPUHP_RCUTREE_PREP,
CPUHP_NOTIFY_PREPARE,
+ CPUHP_TIMERS_DEAD,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
+ CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_IRQ_GIC_STARTING,
+ CPUHP_AP_IRQ_GICV3_STARTING,
+ CPUHP_AP_IRQ_HIP04_STARTING,
+ CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
+ CPUHP_AP_IRQ_BCM2836_STARTING,
+ CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_PERF_X86_UNCORE_STARTING,
+ CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+ CPUHP_AP_PERF_X86_STARTING,
+ CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+ CPUHP_AP_PERF_X86_CQM_STARTING,
+ CPUHP_AP_PERF_X86_CSTATE_STARTING,
+ CPUHP_AP_PERF_XTENSA_STARTING,
+ CPUHP_AP_PERF_METAG_STARTING,
+ CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ CPUHP_AP_ARM_VFP_STARTING,
+ CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+ CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+ CPUHP_AP_ARM_TWD_STARTING,
+ CPUHP_AP_METAG_TIMER_STARTING,
+ CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_ARMADA_TIMER_STARTING,
+ CPUHP_AP_MARCO_TIMER_STARTING,
+ CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+ CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_KVM_STARTING,
+ CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
+ CPUHP_AP_KVM_ARM_VGIC_STARTING,
+ CPUHP_AP_KVM_ARM_TIMER_STARTING,
+ CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT4_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
+ CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
CPUHP_AP_SMPBOOT_THREADS,
+ CPUHP_AP_X86_VDSO_VMA_ONLINE,
+ CPUHP_AP_PERF_ONLINE,
+ CPUHP_AP_PERF_X86_ONLINE,
+ CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
+ CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
+ CPUHP_AP_PERF_X86_RAPL_ONLINE,
+ CPUHP_AP_PERF_X86_CQM_ONLINE,
+ CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+ CPUHP_AP_PERF_S390_CF_ONLINE,
+ CPUHP_AP_PERF_S390_SF_ONLINE,
+ CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_X86_HPET_ONLINE,
+ CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 07b83d32f..bb31373c3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -252,4 +252,22 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
#define CPUIDLE_DRIVER_STATE_START 0
#endif
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+({ \
+ int __ret; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
+})
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index e828cf65d..da7fbf1cd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -579,7 +579,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
}
/**
- * cpumask_parse - extract a cpumask from from a string
+ * cpumask_parse - extract a cpumask from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6e28c895c..7cee55516 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -47,16 +47,18 @@
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
-#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
-#define CRYPTO_ALG_TYPE_HASH 0x00000008
-#define CRYPTO_ALG_TYPE_SHASH 0x00000009
-#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
+#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
+#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
+#define CRYPTO_ALG_TYPE_HASH 0x0000000e
+#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
+#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -486,8 +488,6 @@ struct ablkcipher_tfm {
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
struct crypto_ablkcipher *base;
@@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask)
* state information is unused by the kernel crypto API.
*/
-/**
- * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * ablkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for an ablkcipher. The returned struct
- * crypto_ablkcipher is the cipher handle that is required for any subsequent
- * API invocation for that ablkcipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask);
-
static inline struct crypto_tfm *crypto_ablkcipher_tfm(
struct crypto_ablkcipher *tfm)
{
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 43d5f0b79..9c6dc7704 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, bool wake_all);
@@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t);
-int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t);
#else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags, get_block_t gb)
{
return VM_FAULT_FALLBACK;
}
-#define __dax_pmd_fault dax_pmd_fault
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
-#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f53fa0550..5ff3e9a4f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -130,17 +130,18 @@ struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
- int (*d_compare)(const struct dentry *, const struct dentry *,
+ int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
+ int (*d_init)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_prune)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
- struct inode *(*d_select_inode)(struct dentry *, unsigned);
- struct dentry *(*d_real)(struct dentry *, struct inode *);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *,
+ unsigned int);
} ____cacheline_aligned;
/*
@@ -206,10 +207,8 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
-
-#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
-#define DCACHE_OP_REAL 0x08000000
+#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */
+#define DCACHE_OP_REAL 0x04000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
#define DCACHE_DENTRY_CURSOR 0x20000000
@@ -264,7 +263,7 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, struct qstr *);
+extern void dentry_update_name_case(struct dentry *, const struct qstr *);
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
@@ -557,25 +556,27 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
-static inline struct dentry *d_real(struct dentry *dentry)
+/**
+ * d_real - Return the real dentry
+ * @dentry: the dentry to query
+ * @inode: inode to select the dentry from multiple layers (can be NULL)
+ * @flags: open flags to control copy-up behavior
+ *
+ * If dentry is on an union/overlay, then return the underlying, real dentry.
+ * Otherwise return the dentry itself.
+ *
+ * See also: Documentation/filesystems/vfs.txt
+ */
+static inline struct dentry *d_real(struct dentry *dentry,
+ const struct inode *inode,
+ unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, NULL);
+ return dentry->d_op->d_real(dentry, inode, flags);
else
return dentry;
}
-static inline struct inode *vfs_select_inode(struct dentry *dentry,
- unsigned open_flags)
-{
- struct inode *inode = d_inode(dentry);
-
- if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
- inode = dentry->d_op->d_select_inode(dentry, open_flags);
-
- return inode;
-}
-
/**
* d_real_inode - Return the real inode
* @dentry: The dentry to query
@@ -585,7 +586,7 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
*/
static inline struct inode *d_real_inode(struct dentry *dentry)
{
- return d_backing_inode(d_real(dentry));
+ return d_backing_inode(d_real(dentry, NULL, 0));
}
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 46056cb16..d82bf1994 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -38,7 +38,7 @@ struct debug_obj {
* @name: name of the object typee
* @debug_hint: function returning address, which have associated
* kernel symbol, to allow identify the object
- * @is_static_object return true if the obj is static, otherwise return false
+ * @is_static_object: return true if the obj is static, otherwise return false
* @fixup_init: fixup function, which is called when the init check
* fails. All fixup functions must return true if fixup
* was successful, otherwise return false
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 0830c9e86..91acfce74 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -19,6 +19,15 @@ struct dm_table;
struct mapped_device;
struct bio_vec;
+/*
+ * Type of table, mapped_device's mempool and request_queue
+ */
+#define DM_TYPE_NONE 0
+#define DM_TYPE_BIO_BASED 1
+#define DM_TYPE_REQUEST_BASED 2
+#define DM_TYPE_MQ_REQUEST_BASED 3
+#define DM_TYPE_DAX_BIO_BASED 4
+
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
union map_info {
@@ -116,6 +125,14 @@ typedef void (*dm_io_hints_fn) (struct dm_target *ti,
*/
typedef int (*dm_busy_fn) (struct dm_target *ti);
+/*
+ * Returns:
+ * < 0 : error
+ * >= 0 : the number of bytes accessible at the address
+ */
+typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size);
+
void dm_error(const char *message);
struct dm_dev {
@@ -162,6 +179,7 @@ struct target_type {
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
+ dm_direct_access_fn direct_access;
/* For internal device-mapper use. */
struct list_head list;
@@ -444,6 +462,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
/*
+ * Target can use this to set the table's type.
+ * Can only ever be called from a target's ctr.
+ * Useful for "hybrid" target (supports both bio-based
+ * and request-based).
+ */
+void dm_table_set_type(struct dm_table *t, unsigned type);
+
+/*
* Finally call this to make the table ready for use.
*/
int dm_table_complete(struct dm_table *t);
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a68cbe59e..b91b023de 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -57,7 +57,8 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_rw; /* READ|WRITE - not READA */
+ int bi_op; /* REQ_OP */
+ int bi_op_flags; /* rq_flag_bits */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 4551c6f2a..e0b0741ae 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -242,6 +242,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
-int dma_buf_debugfs_create_file(const char *name,
- int (*write)(struct seq_file *));
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 8443bbb5c..81c5c8d16 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -39,7 +39,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* the arch code to take care of attributes and cache maintenance
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
@@ -56,9 +56,9 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* directly as DMA mapping callbacks for simplicity
*/
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 71c1b215e..dc69df04a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -5,13 +5,58 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/kmemcheck.h>
#include <linux/bug.h>
+/**
+ * List of possible attributes associated with a DMA mapping. The semantics
+ * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ *
+ * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
+ * forces all pending DMA writes to complete.
+ */
+#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+/*
+ * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
+ * may be weakly ordered, that is that reads and writes may pass each other.
+ */
+#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
+/*
+ * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
+ * buffered to improve performance.
+ */
+#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
+/*
+ * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
+ * consistent or non-consistent memory as it sees fit.
+ */
+#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
+/*
+ * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
+ * virtual mapping for the allocated buffer.
+ */
+#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
+/*
+ * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
+ * the CPU cache for the given buffer assuming that it has been already
+ * transferred to 'device' domain.
+ */
+#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
+/*
+ * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
+ * in physical memory.
+ */
+#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
+/*
+ * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
+ * that it's probably not worth the time to try to allocate memory to in a way
+ * that gives better TLB efficiency.
+ */
+#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
+
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot
@@ -21,34 +66,35 @@
struct dma_map_ops {
void* (*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*free)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+ void *, dma_addr_t, size_t,
+ unsigned long attrs);
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, struct dma_attrs *attrs);
+ dma_addr_t, size_t, unsigned long attrs);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/*
* map_sg returns 0 on error and a value > 0 on success.
* It should never return a value < 0.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
@@ -123,7 +169,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
@@ -142,7 +188,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -158,7 +204,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
*/
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
@@ -176,7 +222,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -195,7 +241,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ addr = ops->map_page(dev, page, offset, size, dir, 0);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
@@ -208,7 +254,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, NULL);
+ ops->unmap_page(dev, addr, size, dir, 0);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
@@ -289,10 +335,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
@@ -321,7 +367,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
*/
static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
@@ -330,7 +376,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -338,7 +384,8 @@ dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
@@ -348,7 +395,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#ifndef arch_dma_alloc_attrs
#define arch_dma_alloc_attrs(dev, flag) (true)
@@ -356,7 +403,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
@@ -378,7 +425,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -398,31 +445,27 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -646,9 +689,8 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
@@ -657,9 +699,8 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size,
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_free_writecombine
#define dma_free_writecombine dma_free_wc
@@ -670,15 +711,14 @@ static inline int dma_mmap_wc(struct device *dev,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_mmap_writecombine
#define dma_mmap_writecombine dma_mmap_wc
#endif
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index 79df69dc6..aaff68efb 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -39,14 +39,22 @@ struct hsu_dma_chip {
#if IS_ENABLED(CONFIG_HSU_DMA)
/* Export to the internal users */
-irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 *status);
+irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 status);
/* Export to the platform drivers */
int hsu_dma_probe(struct hsu_dma_chip *chip);
int hsu_dma_remove(struct hsu_dma_chip *chip);
#else
-static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip,
- unsigned short nr)
+static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
+ unsigned short nr, u32 *status)
+{
+ return 0;
+}
+static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
+ unsigned short nr, u32 status)
{
return IRQ_NONE;
}
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index d6b3c9943..002611c85 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -51,7 +51,7 @@
#endif
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.6"
+#define REL_VERSION "8.4.7"
#define API_VERSION 1
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 101
@@ -370,6 +370,14 @@ enum drbd_notification_type {
NOTIFY_FLAGS = NOTIFY_CONTINUES,
};
+enum drbd_peer_state {
+ P_INCONSISTENT = 3,
+ P_OUTDATED = 4,
+ P_DOWN = 5,
+ P_PRIMARY = 6,
+ P_FENCING = 7,
+};
+
#define UUID_JUST_CREATED ((__u64)4)
enum write_ordering_e {
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 2d0e5ad5d..c934d3a96 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -123,15 +123,16 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
__u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF)
__u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF)
__u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF)
+ __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
+ __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
+ __u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF)
__flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF)
__flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF)
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
- __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
- __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
- /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
+ __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 8ac8c5d9a..ddac68422 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -126,8 +126,7 @@
#define DRBD_RESYNC_RATE_DEF 250
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
- /* less than 7 would hit performance unnecessarily. */
-#define DRBD_AL_EXTENTS_MIN 7
+#define DRBD_AL_EXTENTS_MIN 67
/* we use u16 as "slot number", (u16)~0 is "FREE".
* If you use >= 292 kB on-disk ring buffer,
* this is the maximum you can use: */
@@ -210,6 +209,12 @@
#define DRBD_MD_FLUSHES_DEF 1
#define DRBD_TCP_CORK_DEF 1
#define DRBD_AL_UPDATES_DEF 1
+/* We used to ignore the discard_zeroes_data setting.
+ * To not change established (and expected) behaviour,
+ * by default assume that, for discard_zeroes_data=0,
+ * we can make that an effective discard_zeroes_data=1,
+ * if we only explicitly zero-out unaligned partial chunks. */
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
#define DRBD_ALWAYS_ASBP_DEF 0
@@ -230,4 +235,10 @@
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
+
+#define DRBD_RS_DISCARD_GRANULARITY_MIN 0
+#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */
+#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */
+#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */
+
#endif
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4f1bbc68c..546d68057 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,6 +1,10 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+#endif
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -33,6 +37,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
+#ifdef HAVE_JUMP_LABEL
+ union {
+ struct static_key_true dd_key_true;
+ struct static_key_false dd_key_false;
+ } key;
+#endif
} __attribute__((aligned(8)));
@@ -60,7 +70,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
const struct net_device *dev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \
static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
.modname = KBUILD_MODNAME, \
@@ -68,13 +78,51 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
- .flags = _DPRINTK_FLAGS_DEFAULT, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ dd_key_init(key, init) \
}
+#ifdef HAVE_JUMP_LABEL
+
+#define dd_key_init(key, init) key = (init)
+
+#ifdef DEBUG
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \
+ (STATIC_KEY_TRUE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_likely(&descriptor.key.dd_key_true)
+#else
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \
+ (STATIC_KEY_FALSE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_unlikely(&descriptor.key.dd_key_false)
+#endif
+
+#else
+
+#define dd_key_init(key, init)
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0)
+
+#ifdef DEBUG
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ likely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#else
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#endif
+
+#endif
+
#define dynamic_pr_debug(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
@@ -82,7 +130,7 @@ do { \
#define dynamic_dev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -90,7 +138,7 @@ do { \
#define dynamic_netdev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_netdev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -100,7 +148,7 @@ do { \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
__builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
print_hex_dump(KERN_DEBUG, prefix_str, \
prefix_type, rowsize, groupsize, \
buf, len, ascii); \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 17fd2c5bf..0148a3046 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -545,116 +545,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
void efi_native_runtime_setup(void);
/*
- * EFI Configuration Table and GUID definitions
+ * EFI Configuration Table and GUID definitions
+ *
+ * These are all defined in a single line to make them easier to
+ * grep for and to see them at a glance - while still having a
+ * similar structure to the definitions in the spec.
+ *
+ * Here's how they are structured:
+ *
+ * GUID: 12345678-1234-1234-1234-123456789012
+ * Spec:
+ * #define EFI_SOME_PROTOCOL_GUID \
+ * {0x12345678,0x1234,0x1234,\
+ * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}}
+ * Here:
+ * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12)
+ * ^ tabs ^extra space
+ *
+ * Note that the 'extra space' separates the values at the same place
+ * where the UEFI SPEC breaks the line.
*/
-#define NULL_GUID \
- EFI_GUID(0x00000000, 0x0000, 0x0000, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-
-#define MPS_TABLE_GUID \
- EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \
- 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define ACPI_TABLE_GUID \
- EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \
- 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define ACPI_20_TABLE_GUID \
- EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \
- 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-
-#define SMBIOS_TABLE_GUID \
- EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \
- 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define SMBIOS3_TABLE_GUID \
- EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \
- 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-
-#define SAL_SYSTEM_TABLE_GUID \
- EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
- 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define HCDP_TABLE_GUID \
- EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
- 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
-
-#define UGA_IO_PROTOCOL_GUID \
- EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
- 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
-
-#define EFI_GLOBAL_VARIABLE_GUID \
- EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
- 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
-
-#define UV_SYSTEM_TABLE_GUID \
- EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
- 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
-
-#define LINUX_EFI_CRASH_GUID \
- EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
- 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
-
-#define LOADED_IMAGE_PROTOCOL_GUID \
- EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
- 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
- EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
- 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-
-#define EFI_UGA_PROTOCOL_GUID \
- EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
- 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
-
-#define EFI_PCI_IO_PROTOCOL_GUID \
- EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
- 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
-
-#define EFI_FILE_INFO_ID \
- EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
- 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
- EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
- 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
-
-#define EFI_FILE_SYSTEM_GUID \
- EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
- 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define DEVICE_TREE_GUID \
- EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
- 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-
-#define EFI_PROPERTIES_TABLE_GUID \
- EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
- 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
-
-#define EFI_RNG_PROTOCOL_GUID \
- EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
- 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
-
-#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
- EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
- 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
-
-#define EFI_CONSOLE_OUT_DEVICE_GUID \
- EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
- 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
+#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
+#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
+#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
+#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
+#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
+#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
+#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
+#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
+#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
+#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
+#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
+#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
+#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
* associated with ConOut
*/
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
- EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
- 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
-
-#define LINUX_EFI_LOADER_ENTRY_GUID \
- EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
- 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
typedef struct {
efi_guid_t guid;
@@ -946,7 +888,7 @@ extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec *ts);
+extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
extern void efi_late_init(void);
@@ -984,7 +926,6 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct kobject *efi_kobj;
@@ -1470,6 +1411,56 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
unsigned long size);
bool efi_runtime_disabled(void);
+extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ *
+ * * arch_efi_call_virt_setup()
+ *
+ * Sets up the environment for the call (e.g. switching page tables,
+ * allowing kernel-mode use of floating point, if required).
+ *
+ * * arch_efi_call_virt()
+ *
+ * Performs the call. The last expression in the macro must be the call
+ * itself, allowing the logic to be shared by the void and non-void
+ * cases.
+ *
+ * * arch_efi_call_virt_teardown()
+ *
+ * Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt_pointer(p, f, args...) \
+({ \
+ efi_status_t __s; \
+ unsigned long __flags; \
+ \
+ arch_efi_call_virt_setup(); \
+ \
+ local_save_flags(__flags); \
+ __s = arch_efi_call_virt(p, f, args); \
+ efi_call_virt_check_flags(__flags, __stringify(f)); \
+ \
+ arch_efi_call_virt_teardown(); \
+ \
+ __s; \
+})
+
+#define __efi_call_virt_pointer(p, f, args...) \
+({ \
+ unsigned long __flags; \
+ \
+ arch_efi_call_virt_setup(); \
+ \
+ local_save_flags(__flags); \
+ arch_efi_call_virt(p, f, args); \
+ efi_call_virt_check_flags(__flags, __stringify(f)); \
+ \
+ arch_efi_call_virt_teardown(); \
+})
typedef efi_status_t (*efi_exit_boot_map_processing)(
efi_system_table_t *sys_table_arg,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 638b324f0..e7f358d2e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *,
typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
-typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
+typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
+ struct request *, struct bio *);
+
+typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
+ struct request *, struct request *);
typedef void (elevator_bio_merged_fn) (struct request_queue *,
struct request *, struct bio *);
@@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, int);
+typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -46,7 +50,8 @@ struct elevator_ops
elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_merge_fn *elevator_allow_merge_fn;
+ elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
+ elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
elevator_bio_merged_fn *elevator_bio_merged_fn;
elevator_dispatch_fn *elevator_dispatch_fn;
@@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, int);
+extern int elv_may_queue(struct request_queue *, int, int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
struct bio *bio, gfp_t gfp_mask);
@@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct elevator_queue *);
extern int elevator_change(struct request_queue *, const char *);
-extern bool elv_rq_merge_ok(struct request *, struct bio *);
+extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 37ff4a6fa..6fec9e81b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -374,6 +374,29 @@ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
}
/**
+ * ether_addr_equal_masked - Compare two Ethernet addresses with a mask
+ * @addr1: Pointer to a six-byte array containing the 1st Ethernet address
+ * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address
+ * @mask: Pointer to a six-byte array containing the Ethernet address bitmask
+ *
+ * Compare two Ethernet addresses with a mask, returns true if for every bit
+ * set in the bitmask the equivalent bits in the ethernet addresses are equal.
+ * Using a mask with all bits set is a slower ether_addr_equal.
+ */
+static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
+ const u8 *mask)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ if ((addr1[i] ^ addr2[i]) & mask[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/export.h b/include/linux/export.h
index 2f9ccbe6a..c565f87f0 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -82,7 +82,7 @@ extern struct module __this_module;
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, sec) \
- __cond_export_sym(sym, sec, config_enabled(__KSYM_##sym))
+ __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, sec, conf) \
___cond_export_sym(sym, sec, conf)
#define ___cond_export_sym(sym, sec, enabled) \
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index d8414502e..b03c0625f 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -6,6 +6,7 @@
struct dentry;
struct iattr;
struct inode;
+struct iomap;
struct super_block;
struct vfsmount;
@@ -187,21 +188,6 @@ struct fid {
* get_name is not (which is possibly inconsistent)
*/
-/* types of block ranges for multipage write mappings. */
-#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
-#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
-
-#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
-
-struct iomap {
- sector_t blkno; /* first sector of mapping */
- loff_t offset; /* file offset of mapping, bytes */
- u64 length; /* length of mapping, bytes */
- int type; /* type of mapping */
-};
-
struct export_operations {
int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
struct inode *parent);
diff --git a/include/linux/extable.h b/include/linux/extable.h
new file mode 100644
index 000000000..7effea4b2
--- /dev/null
+++ b/include/linux/extable.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_EXTABLE_H
+#define _LINUX_EXTABLE_H
+
+#include <linux/stddef.h> /* for NULL */
+
+struct module;
+struct exception_table_entry;
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value);
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+void sort_main_extable(void);
+void trim_init_extable(struct module *m);
+
+/* Given an address, look for it in the exception tables */
+const struct exception_table_entry *search_exception_tables(unsigned long add);
+
+#ifdef CONFIG_MODULES
+/* For extable.c to search modules' exception tables. */
+const struct exception_table_entry *search_module_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_module_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif /*CONFIG_MODULES*/
+
+#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7abf674c3..61004413d 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -126,42 +126,6 @@ struct extcon_dev {
struct device_attribute *d_attrs_muex;
};
-/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev: The extcon device
- * @cable_index: Index of this cable in the edev
- * @attr_g: Attribute group for the cable
- * @attr_name: "name" sysfs entry
- * @attr_state: "state" sysfs entry
- * @attrs: Array pointing to attr_name and attr_state for attr_g
- */
-struct extcon_cable {
- struct extcon_dev *edev;
- int cable_index;
-
- struct attribute_group attr_g;
- struct device_attribute attr_name;
- struct device_attribute attr_state;
-
- struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
-};
-
-/**
- * struct extcon_specific_cable_nb - An internal data for
- * extcon_register_interest().
- * @user_nb: user provided notifier block for events from
- * a specific cable.
- * @cable_index: the target cable.
- * @edev: the target extcon device.
- * @previous_value: the saved previous event value.
- */
-struct extcon_specific_cable_nb {
- struct notifier_block *user_nb;
- int cable_index;
- struct extcon_dev *edev;
- unsigned long previous_value;
-};
-
#if IS_ENABLED(CONFIG_EXTCON)
/*
@@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
/*
* get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable_name.
+ * They are used to access the status of each cable based on the cable id.
*/
extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
bool cable_state);
-extern int extcon_get_cable_state(struct extcon_dev *edev,
- const char *cable_name);
-extern int extcon_set_cable_state(struct extcon_dev *edev,
- const char *cable_name, bool cable_state);
-
-/*
- * Following APIs are for notifiees (those who want to be notified)
- * to register a callback for events from a specific cable of the extcon.
- * Notifiees are the connected device drivers wanting to get notified by
- * a specific external port of a connection device.
- */
-extern int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name,
- const char *cable_name,
- struct notifier_block *nb);
-extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
-
/*
* Following APIs are to monitor every action of a notifier.
* Registrar gets notified for every external port of a connection device.
@@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int devm_extcon_register_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+extern void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
/*
* Following API get the extcon device from devicetree.
@@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
/* Following API to get information of extcon device */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev,
return 0;
}
-static inline int extcon_get_cable_state(struct extcon_dev *edev,
- const char *cable_name)
-{
- return 0;
-}
-
-static inline int extcon_set_cable_state(struct extcon_dev *edev,
- const char *cable_name, int state)
-{
- return 0;
-}
-
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return NULL;
@@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev,
return 0;
}
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name,
- const char *cable_name,
- struct notifier_block *nb)
+static inline int devm_extcon_register_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
{
- return 0;
+ return -ENOSYS;
}
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
- *obj)
-{
- return 0;
-}
+static inline void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb) { }
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index)
@@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_EXTCON */
+
+/*
+ * Following structure and API are deprecated. EXTCON remains the function
+ * definition to prevent the build break.
+ */
+struct extcon_specific_cable_nb {
+ struct notifier_block *user_nb;
+ int cable_index;
+ struct extcon_dev *edev;
+ unsigned long previous_value;
+};
+
+static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
+ const char *extcon_name, const char *cable_name,
+ struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
+ *obj)
+{
+ return -EINVAL;
+}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 53c60806b..ac85f2061 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -53,6 +53,7 @@ struct adc_jack_cond {
* milli-seconds after the interrupt occurs. You may
* describe such delays with @handling_delay_ms, which
* is rounded-off by jiffies.
+ * @wakeup_source: flag to wake up the system for extcon events.
*/
struct adc_jack_pdata {
const char *name;
@@ -65,6 +66,7 @@ struct adc_jack_pdata {
unsigned long irq_flags;
unsigned long handling_delay_ms; /* in ms */
+ bool wakeup_source;
};
#endif /* _EXTCON_ADC_JACK_H */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644
index 000000000..86baaa455
--- /dev/null
+++ b/include/linux/fence-array.h
@@ -0,0 +1,73 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_ARRAY_H
+#define __LINUX_FENCE_ARRAY_H
+
+#include <linux/fence.h>
+
+/**
+ * struct fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct fence_array_cb {
+ struct fence_cb cb;
+ struct fence_array *array;
+};
+
+/**
+ * struct fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct fence_array {
+ struct fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct fence **fences;
+};
+
+extern const struct fence_ops fence_array_ops;
+
+/**
+ * to_fence_array - cast a fence to a fence_array
+ * @fence: fence to cast to a fence_array
+ *
+ * Returns NULL if the fence is not a fence_array,
+ * or the fence_array otherwise.
+ */
+static inline struct fence_array *to_fence_array(struct fence *fence)
+{
+ if (fence->ops != &fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct fence_array, base);
+}
+
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2056e9fd0..2ac6fa5f4 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,8 +49,6 @@ struct fence_cb;
* @timestamp: Timestamp when the fence was signaled.
* @status: Optional, only valid if < 0, must be set before calling
* fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
*
* the flags member must be manipulated and read using the appropriate
* atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -77,12 +75,11 @@ struct fence {
struct rcu_head rcu;
struct list_head cb_list;
spinlock_t *lock;
- unsigned context, seqno;
+ u64 context;
+ unsigned seqno;
unsigned long flags;
ktime_t timestamp;
int status;
- struct list_head child_list;
- struct list_head active_list;
};
enum fence_flag_bits {
@@ -180,7 +177,7 @@ struct fence_ops {
};
void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno);
+ spinlock_t *lock, u64 context, unsigned seqno);
void fence_release(struct kref *kref);
void fence_free(struct fence *fence);
@@ -354,27 +351,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
-unsigned fence_context_alloc(unsigned num);
+u64 fence_context_alloc(unsigned num);
#define FENCE_TRACE(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- if (config_enabled(CONFIG_FENCE_TRACE)) \
- pr_info("f %u#%u: " fmt, \
+ if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
__ff->context, __ff->seqno, ##args); \
} while (0)
#define FENCE_WARN(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
#define FENCE_ERR(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8f74f3d61..a16439b99 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -368,6 +368,11 @@ struct bpf_skb_data_end {
void *data_end;
};
+struct xdp_buff {
+ void *data;
+ void *data_end;
+};
+
/* compute the linear packet data range [data, data_end) which
* will be accessed by cls_bpf and act_bpf programs
*/
@@ -429,6 +434,18 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, skb);
}
+static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 ret;
+
+ rcu_read_lock();
+ ret = BPF_PROG_RUN(prog, (void *)xdp);
+ rcu_read_unlock();
+
+ return ret;
+}
+
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -513,6 +530,7 @@ bool bpf_helper_changes_skb_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 92156e687..9177e4b91 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -47,6 +47,8 @@ int request_firmware_nowait(
void (*cont)(const struct firmware *fw, void *context));
int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
+int request_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device, void *buf, size_t size);
void release_firmware(const struct firmware *fw);
#else
@@ -75,6 +77,12 @@ static inline int request_firmware_direct(const struct firmware **fw,
return -EINVAL;
}
+static inline int request_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device, void *buf, size_t size)
+{
+ return -EINVAL;
+}
+
#endif
#ifndef _LINUX_LIBRE_FIRMWARE_H
#define _LINUX_LIBRE_FIRMWARE_H
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index e65ef9595..c46d2aa16 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -4,6 +4,7 @@
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/bitops.h>
+#include <linux/jump_label.h>
struct frontswap_ops {
void (*init)(unsigned); /* this swap type was just swapon'ed */
@@ -14,7 +15,6 @@ struct frontswap_ops {
struct frontswap_ops *next; /* private pointer to next ops */
};
-extern bool frontswap_enabled;
extern void frontswap_register_ops(struct frontswap_ops *ops);
extern void frontswap_shrink(unsigned long);
extern unsigned long frontswap_curr_pages(void);
@@ -30,7 +30,12 @@ extern void __frontswap_invalidate_page(unsigned, pgoff_t);
extern void __frontswap_invalidate_area(unsigned);
#ifdef CONFIG_FRONTSWAP
-#define frontswap_enabled (1)
+extern struct static_key_false frontswap_enabled_key;
+
+static inline bool frontswap_enabled(void)
+{
+ return static_branch_unlikely(&frontswap_enabled_key);
+}
static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
{
@@ -50,7 +55,10 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
#else
/* all inline routines become no-ops and all externs are ignored */
-#define frontswap_enabled (0)
+static inline bool frontswap_enabled(void)
+{
+ return false;
+}
static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
{
@@ -70,37 +78,35 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
static inline int frontswap_store(struct page *page)
{
- int ret = -1;
+ if (frontswap_enabled())
+ return __frontswap_store(page);
- if (frontswap_enabled)
- ret = __frontswap_store(page);
- return ret;
+ return -1;
}
static inline int frontswap_load(struct page *page)
{
- int ret = -1;
+ if (frontswap_enabled())
+ return __frontswap_load(page);
- if (frontswap_enabled)
- ret = __frontswap_load(page);
- return ret;
+ return -1;
}
static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
{
- if (frontswap_enabled)
+ if (frontswap_enabled())
__frontswap_invalidate_page(type, offset);
}
static inline void frontswap_invalidate_area(unsigned type)
{
- if (frontswap_enabled)
+ if (frontswap_enabled())
__frontswap_invalidate_area(type);
}
static inline void frontswap_init(unsigned type, unsigned long *map)
{
- if (frontswap_enabled)
+ if (frontswap_enabled())
__frontswap_init(type, map);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fced11a8b..3b3703885 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -152,9 +152,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define CHECK_IOVEC_ONLY -1
/*
- * The below are the various read and write types that we support. Some of
+ * The below are the various read and write flags that we support. Some of
* them include behavioral modifiers that send information down to the
- * block layer and IO scheduler. Terminology:
+ * block layer and IO scheduler. They should be used along with a req_op.
+ * Terminology:
*
* The block layer uses device plugging to defer IO a little bit, in
* the hope that we will see more IO very shortly. This increases
@@ -177,9 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* READ_SYNC A synchronous read. Device is not plugged, caller can
* immediately wait on this read without caring about
* unplugging.
- * READA Used for read-ahead operations. Lower priority, and the
- * block layer could (in theory) choose to ignore this
- * request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged.
* WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO
@@ -194,22 +192,19 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* WRITE_BG Background write. This is for background activity like
* the periodic flush and background threshold writeback
*
- *
*/
-#define RW_MASK REQ_WRITE
-#define RWA_MASK REQ_RAHEAD
+#define RW_MASK REQ_OP_WRITE
-#define READ 0
-#define WRITE RW_MASK
-#define READA RWA_MASK
+#define READ REQ_OP_READ
+#define WRITE REQ_OP_WRITE
-#define READ_SYNC (READ | REQ_SYNC)
-#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
-#define WRITE_ODIRECT (WRITE | REQ_SYNC)
-#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
-#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
-#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
-#define WRITE_BG (WRITE | REQ_NOIDLE | REQ_BG)
+#define READ_SYNC REQ_SYNC
+#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
+#define WRITE_ODIRECT REQ_SYNC
+#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
+#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
+#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
+#define WRITE_BG (REQ_NOIDLE | REQ_BG)
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -406,6 +401,8 @@ struct address_space_operations {
*/
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ void (*putback_page)(struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
@@ -463,7 +460,6 @@ struct block_device {
struct inode * bd_inode; /* will die */
struct super_block * bd_super;
struct mutex bd_mutex; /* open/close mutex */
- struct list_head bd_inodes;
void * bd_claiming;
void * bd_holder;
int bd_holders;
@@ -581,6 +577,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
uncached_acl_sentinel(struct task_struct *task)
@@ -669,6 +666,7 @@ struct inode {
#endif
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
+ struct list_head i_wb_list; /* backing dev writeback list */
union {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
@@ -835,31 +833,6 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
#endif
}
-/* Helper functions so that in most cases filesystems will
- * not need to deal directly with kuid_t and kgid_t and can
- * instead deal with the raw numeric values that are stored
- * in the filesystem.
- */
-static inline uid_t i_uid_read(const struct inode *inode)
-{
- return from_kuid(&init_user_ns, inode->i_uid);
-}
-
-static inline gid_t i_gid_read(const struct inode *inode)
-{
- return from_kgid(&init_user_ns, inode->i_gid);
-}
-
-static inline void i_uid_write(struct inode *inode, uid_t uid)
-{
- inode->i_uid = make_kuid(&init_user_ns, uid);
-}
-
-static inline void i_gid_write(struct inode *inode, gid_t gid)
-{
- inode->i_gid = make_kgid(&init_user_ns, gid);
-}
-
static inline unsigned iminor(const struct inode *inode)
{
return MINOR(inode->i_rdev);
@@ -1276,12 +1249,7 @@ static inline struct inode *file_inode(const struct file *f)
static inline struct dentry *file_dentry(const struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
-
- if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, file_inode(file));
- else
- return dentry;
+ return d_real(file->f_path.dentry, file_inode(file), 0);
}
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
@@ -1329,11 +1297,13 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-extern struct list_head super_blocks;
-
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
+#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+
+/* sb->s_iflags to limit user namespace mounts */
+#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
/* Possible states of 'frozen' field */
enum {
@@ -1437,6 +1407,13 @@ struct super_block {
struct hlist_head s_pins;
/*
+ * Owning user namespace and default context in which to
+ * interpret filesystem uids, gids, quotas, device nodes,
+ * xattrs and security labels.
+ */
+ struct user_namespace *s_user_ns;
+
+ /*
* Keep the lru lists last in the structure so they always sit on their
* own individual cachelines.
*/
@@ -1455,8 +1432,36 @@ struct super_block {
/* s_inode_list_lock protects s_inodes */
spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
struct list_head s_inodes; /* all inodes */
+
+ spinlock_t s_inode_wblist_lock;
+ struct list_head s_inodes_wb; /* writeback inodes */
};
+/* Helper functions so that in most cases filesystems will
+ * not need to deal directly with kuid_t and kgid_t and can
+ * instead deal with the raw numeric values that are stored
+ * in the filesystem.
+ */
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+ return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+ return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+ inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+ inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
+}
+
extern struct timespec current_fs_time(struct super_block *sb);
/*
@@ -1599,6 +1604,7 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
*/
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
umode_t mode);
+extern bool may_open_dev(const struct path *path);
/*
* VFS FS_IOC_FIEMAP helper definitions.
*/
@@ -1834,8 +1840,6 @@ struct super_operations {
#else
#define S_DAX 0 /* Make all the DAX code disappear */
#endif
-#define S_ATOMIC_COPY 16384 /* Pages mapped with this inode need to be
- atomically copied (gem) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1878,6 +1882,11 @@ struct super_operations {
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+static inline bool HAS_UNMAPPED_ID(struct inode *inode)
+{
+ return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
+}
+
/*
* Inode state bits. Protected by inode->i_lock
*
@@ -2026,8 +2035,6 @@ struct file_system_type {
#define FS_BINARY_MOUNTDATA 2
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
-#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
-#define FS_USERNS_VISIBLE 32 /* FS must already be visible */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
@@ -2048,8 +2055,9 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
- void *data, int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_ns(struct file_system_type *fs_type,
+ int flags, void *data, void *ns, struct user_namespace *user_ns,
+ int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2069,6 +2077,11 @@ void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
+struct super_block *sget_userns(struct file_system_type *type,
+ int (*test)(struct super_block *,void *),
+ int (*set)(struct super_block *,void *),
+ int flags, struct user_namespace *user_ns,
+ void *data);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
@@ -2369,13 +2382,6 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-extern int fsync_super(struct super_block *);
-extern int fsync_no_super(struct block_device *);
-#define FS_FREEZER_FUSE 1
-#define FS_FREEZER_NORMAL 2
-#define FS_FREEZER_ALL (FS_FREEZER_FUSE | FS_FREEZER_NORMAL)
-void freeze_filesystems(int which);
-void thaw_filesystems(int which);
extern struct super_block *blockdev_superblock;
@@ -2488,15 +2494,18 @@ extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
-/*
- * return READ, READA, or WRITE
- */
-#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
+static inline bool op_is_write(unsigned int op)
+{
+ return op == REQ_OP_READ ? false : true;
+}
/*
* return data direction, READ or WRITE
*/
-#define bio_data_dir(bio) ((bio)->bi_rw & 1)
+static inline int bio_data_dir(struct bio *bio)
+{
+ return op_is_write(bio_op(bio)) ? WRITE : READ;
+}
extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
@@ -2531,6 +2540,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
+extern int filemap_check_errors(struct address_space *mapping);
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
@@ -2655,6 +2665,7 @@ extern int do_pipe_flags(int *, int);
#define __kernel_read_file_id(id) \
id(UNKNOWN, unknown) \
id(FIRMWARE, firmware) \
+ id(FIRMWARE_PREALLOC_BUFFER, firmware) \
id(MODULE, kernel-module) \
id(KEXEC_IMAGE, kexec-image) \
id(KEXEC_INITRAMFS, kexec-initramfs) \
@@ -2749,11 +2760,6 @@ extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
extern int file_remove_privs(struct file *);
-extern int dentry_needs_remove_privs(struct dentry *dentry);
-static inline int file_needs_remove_privs(struct file *file)
-{
- return dentry_needs_remove_privs(file->f_path.dentry);
-}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -2771,7 +2777,7 @@ static inline void remove_inode_hash(struct inode *inode)
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
-extern blk_qc_t submit_bio(int, struct bio *);
+extern blk_qc_t submit_bio(struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
extern int set_blocksize(struct block_device *, int);
@@ -2826,7 +2832,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
-typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
+typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
loff_t file_offset);
enum {
@@ -3198,7 +3204,4 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
-extern void take_super_lock(void);
-extern void release_super_lock(void);
-
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cfa6cde25..76cff18bb 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct inode *,
- const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
const struct fscrypt_policy *p)
{
return -EOPNOTSUPP;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 0141f257d..eed9e853a 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -52,18 +52,6 @@ static inline int fsnotify_perm(struct file *file, int mask)
}
/*
- * fsnotify_d_move - dentry has been moved
- */
-static inline void fsnotify_d_move(struct dentry *dentry)
-{
- /*
- * On move we need to update dentry->d_flags to indicate if the new parent
- * cares about events from this dentry.
- */
- __fsnotify_update_dcache_flags(dentry);
-}
-
-/*
* fsnotify_link_count - inode's link count changed
*/
static inline void fsnotify_link_count(struct inode *inode)
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 16af670a9..7268ed076 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -267,10 +267,8 @@ static inline int fsnotify_inode_watches_children(struct inode *inode)
* Update the dentry with a flag indicating the interest of its parent to receive
* filesystem events when those events happens to this dentry->d_inode.
*/
-static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+static inline void fsnotify_update_flags(struct dentry *dentry)
{
- struct dentry *parent;
-
assert_spin_locked(&dentry->d_lock);
/*
@@ -280,21 +278,12 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
- parent = dentry->d_parent;
- if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
+ if (fsnotify_inode_watches_children(dentry->d_parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
}
-/*
- * fsnotify_d_instantiate - instantiate a dentry for inode
- */
-static inline void __fsnotify_d_instantiate(struct dentry *dentry)
-{
- __fsnotify_update_dcache_flags(dentry);
-}
-
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
@@ -386,10 +375,7 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
-static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
-{}
-
-static inline void __fsnotify_d_instantiate(struct dentry *dentry)
+static inline void fsnotify_update_flags(struct dentry *dentry)
{}
static inline u32 fsnotify_get_cookie(void)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 66a36a815..7d565afe3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -754,23 +754,27 @@ static inline void ftrace_init(void) { }
/*
* Structure that defines an entry function trace.
+ * It's already packed but the attribute "packed" is needed
+ * to remove extra padding at the end.
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
int depth;
-};
+} __packed;
/*
* Structure that defines a return function trace.
+ * It's already packed but the attribute "packed" is needed
+ * to remove extra padding at the end.
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
- unsigned long long calltime;
- unsigned long long rettime;
/* Number of functions that overran the depth limit for current task */
unsigned long overrun;
+ unsigned long long calltime;
+ unsigned long long rettime;
int depth;
-};
+} __packed;
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 359a8e4bd..1dbf52f9c 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -205,7 +205,6 @@ struct gendisk {
void *private_data;
int flags;
- struct device *driverfs_dev; // FIXME: remove
struct kobject *slave_dir;
struct timer_rand_state *random;
@@ -414,7 +413,12 @@ static inline void free_part_info(struct hd_struct *part)
extern void part_round_stats(int cpu, struct hd_struct *part);
/* block/genhd.c */
-extern void add_disk(struct gendisk *disk);
+extern void device_add_disk(struct device *parent, struct gendisk *disk);
+static inline void add_disk(struct gendisk *disk)
+{
+ device_add_disk(NULL, disk);
+}
+
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 327c2d79a..f8041f9de 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,8 +40,7 @@ struct vm_area_struct;
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
-#define ___GFP_TOI_NOTRACK 0x2000000u
-#define ___GFP_KSWAPD_RECLAIM 0x4000000u
+#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -79,8 +78,7 @@ struct vm_area_struct;
* __GFP_THISNODE forces the allocation to be satisified from the requested
* node with no fallbacks or placement policy enforcements.
*
- * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant
- * to kmem allocations).
+ * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
@@ -156,7 +154,6 @@ struct vm_area_struct;
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-#define __GFP_TOI_NOTRACK ((__force gfp_t)___GFP_TOI_NOTRACK) /* Allocator wants page untracked by TOI */
/*
* Action modifiers
@@ -190,7 +187,7 @@ struct vm_area_struct;
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 27
+#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -240,9 +237,11 @@ struct vm_area_struct;
* are expected to be movable via page reclaim or page migration. Typically,
* pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
*
- * GFP_TRANSHUGE is used for THP allocations. They are compound allocations
- * that will fail quickly if memory is not available and will not wake
- * kswapd on failure.
+ * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
+ * compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
@@ -257,9 +256,9 @@ struct vm_area_struct;
#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
- ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -488,10 +487,6 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
-extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
-extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order);
-
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
@@ -515,9 +510,6 @@ extern void *__alloc_page_frag(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask);
extern void __free_page_frag(void *addr);
-extern void __free_kmem_pages(struct page *page, unsigned int order);
-extern void free_kmem_pages(unsigned long addr, unsigned int order);
-
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index d2ba7d334..1ffbf2a8c 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -304,6 +304,8 @@ struct tegra_mipi_device;
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_enable(struct tegra_mipi_device *device);
+int tegra_mipi_disable(struct tegra_mipi_device *device);
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index c98c6539e..5e00f80b1 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -494,4 +494,11 @@ extern void __init hrtimers_init(void);
/* Show pending timers: */
extern void sysrq_timer_list_show(void);
+int hrtimers_prepare_cpu(unsigned int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+int hrtimers_dead_cpu(unsigned int cpu);
+#else
+#define hrtimers_dead_cpu NULL
+#endif
+
#endif
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 2790591c7..57402544b 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -246,7 +246,7 @@ struct hsi_port {
int (*stop_tx)(struct hsi_client *cl);
int (*release)(struct hsi_client *cl);
/* private */
- struct atomic_notifier_head n_head;
+ struct blocking_notifier_head n_head;
};
#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f0a7a0320..6f14de45b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -1,25 +1,17 @@
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- unsigned int flags);
+extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- pmd_t orig_pmd, int dirty);
-extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- pmd_t orig_pmd);
+extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
+extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
unsigned int flags);
-extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
+extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
extern int zap_huge_pmd(struct mmu_gather *tlb,
@@ -49,6 +41,18 @@ enum transparent_hugepage_flag {
#endif
};
+struct kobject;
+struct kobj_attribute;
+
+extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
+extern struct kobj_attribute shmem_enabled_attr;
+
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
@@ -134,8 +138,7 @@ static inline int hpage_nr_pages(struct page *page)
return 1;
}
-extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -152,6 +155,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
struct page *get_huge_zero_page(void);
void put_huge_zero_page(void);
+#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -161,6 +166,8 @@ void put_huge_zero_page(void);
#define transparent_hugepage_enabled(__vma) 0
+static inline void prep_transhuge_page(struct page *page) {}
+
#define transparent_hugepage_flags 0UL
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
@@ -196,8 +203,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
{
return 0;
}
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 8f1b086ca..c2e3324f9 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -23,6 +23,8 @@
#define _LINUX_I2C_SMBUS_H
#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
/**
@@ -48,4 +50,31 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
+/**
+ * smbus_host_notify - internal structure used by the Host Notify mechanism.
+ * @adapter: the I2C adapter associated with this struct
+ * @work: worker used to schedule the IRQ in the slave device
+ * @lock: spinlock to check if a notification is already pending
+ * @pending: flag set when a notification is pending (any new notification will
+ * be rejected if pending is true)
+ * @payload: the actual payload of the Host Notify event
+ * @addr: the address of the slave device which raised the notification
+ *
+ * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does
+ * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a
+ * managed resource to clean this up when the adapter get released.
+ */
+struct smbus_host_notify {
+ struct i2c_adapter *adapter;
+ struct work_struct work;
+ spinlock_t lock;
+ bool pending;
+ u16 payload;
+ u8 addr;
+};
+
+struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap);
+int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
+ unsigned short addr, unsigned int data);
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 96a25ae14..fffdc270c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -126,6 +126,11 @@ i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 command, u8 length, u8 *values);
#endif /* I2C */
+enum i2c_alert_protocol {
+ I2C_PROTOCOL_SMBUS_ALERT,
+ I2C_PROTOCOL_SMBUS_HOST_NOTIFY,
+};
+
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
@@ -180,8 +185,11 @@ struct i2c_driver {
* The format and meaning of the data value depends on the protocol.
* For the SMBus alert protocol, there is a single bit of data passed
* as the alert response's low bit ("event flag").
+ * For the SMBus Host Notify protocol, the data corresponds to the
+ * 16-bit payload data reported by the slave device acting as master.
*/
- void (*alert)(struct i2c_client *, unsigned int data);
+ void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol,
+ unsigned int data);
/* a ioctl like command that can be used to perform specific functions
* with the device.
@@ -349,6 +357,11 @@ extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
extern struct i2c_client *
i2c_new_dummy(struct i2c_adapter *adap, u16 address);
+extern struct i2c_client *
+i2c_new_secondary_device(struct i2c_client *client,
+ const char *name,
+ u16 default_addr);
+
extern void i2c_unregister_device(struct i2c_client *);
#endif /* I2C */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 630f45335..57086e9fc 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -14,9 +14,12 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
-typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ unsigned int data_len);
#else
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b118744d3..a80516fd6 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -2464,7 +2465,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
- if (skb->len < 25)
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
return false;
return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
}
@@ -2487,6 +2488,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * _ieee80211_is_group_privacy_action - check if frame is a group addressed
+ * privacy action frame
+ * @hdr: the frame
+ */
+static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+ if (!ieee80211_is_action(hdr->frame_control) ||
+ !is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION ||
+ mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION;
+}
+
+/**
+ * ieee80211_is_group_privacy_action - check if frame is a group addressed
+ * privacy action frame
+ * @skb: the skb containing the frame, length will be checked
+ */
+static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
+{
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+ return _ieee80211_is_group_privacy_action((void *)skb->data);
+}
+
+/**
* ieee80211_tu_to_usec - convert time units (TU) to microseconds
* @tu: the TUs
*/
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index acedbb68a..ddb890174 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -31,6 +31,8 @@
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
#define IEEE802154_MAX_AUTH_TAG_LEN 16
+#define IEEE802154_FC_LEN 2
+#define IEEE802154_SEQ_LEN 1
/* General MAC frame format:
* 2 bytes: Frame Control
@@ -48,6 +50,7 @@
#define IEEE802154_EXTENDED_ADDR_LEN 8
#define IEEE802154_SHORT_ADDR_LEN 2
+#define IEEE802154_PAN_ID_LEN 2
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
@@ -221,9 +224,17 @@ enum {
#define IEEE802154_FCTL_ACKREQ 0x0020
#define IEEE802154_FCTL_SECEN 0x0004
#define IEEE802154_FCTL_INTRA_PAN 0x0040
+#define IEEE802154_FCTL_DADDR 0x0c00
+#define IEEE802154_FCTL_SADDR 0xc000
#define IEEE802154_FTYPE_DATA 0x0001
+#define IEEE802154_FCTL_ADDR_NONE 0x0000
+#define IEEE802154_FCTL_DADDR_SHORT 0x0800
+#define IEEE802154_FCTL_DADDR_EXTENDED 0x0c00
+#define IEEE802154_FCTL_SADDR_SHORT 0x8000
+#define IEEE802154_FCTL_SADDR_EXTENDED 0xc000
+
/*
* ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
@@ -261,6 +272,24 @@ static inline bool ieee802154_is_intra_pan(__le16 fc)
return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN);
}
+/*
+ * ieee802154_daddr_mode - get daddr mode from fc
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline __le16 ieee802154_daddr_mode(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_DADDR);
+}
+
+/*
+ * ieee802154_saddr_mode - get saddr mode from fc
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline __le16 ieee802154_saddr_mode(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_SADDR);
+}
+
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
* available lengths:
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 99403b190..228bd44ef 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -223,6 +223,7 @@ struct st_sensor_settings {
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
+ * @edge_irq: the IRQ triggers on edges and need special handling.
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
*/
@@ -250,14 +251,13 @@ struct st_sensor_data {
const struct st_sensor_transfer_function *tf;
struct st_sensor_transfer_buffer tb;
+ bool edge_irq;
bool hw_irq_trigger;
s64 hw_timestamp;
};
#ifdef CONFIG_IIO_BUFFER
irqreturn_t st_sensors_trigger_handler(int irq, void *p);
-
-int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf);
#endif
#ifdef CONFIG_IIO_TRIGGER
@@ -287,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
-void st_sensors_power_enable(struct iio_dev *indio_dev);
+int st_sensors_power_enable(struct iio_dev *indio_dev);
void st_sensors_power_disable(struct iio_dev *indio_dev);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 7c29cb012..854e2dad1 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -312,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
}, \
}
-/**
- * iio_get_time_ns() - utility function to get a time stamp for events etc
- **/
-static inline s64 iio_get_time_ns(void)
-{
- return ktime_get_real_ns();
-}
+s64 iio_get_time_ns(const struct iio_dev *indio_dev);
+unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
/* Device operating modes */
#define INDIO_DIRECT_MODE 0x01
@@ -497,6 +492,7 @@ struct iio_buffer_setup_ops {
* @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
* @info: [DRIVER] callbacks and constant info from driver
+ * @clock_id: [INTERN] timestamping clock posix identifier
* @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
@@ -537,6 +533,7 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ clockid_t clock_id;
struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
@@ -565,7 +562,7 @@ extern struct bus_type iio_bus_type;
/**
* iio_device_put() - reference counted deallocation of struct device
- * @indio_dev: IIO device structure containing the device
+ * @indio_dev: IIO device structure containing the device
**/
static inline void iio_device_put(struct iio_dev *indio_dev)
{
@@ -574,6 +571,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
}
/**
+ * iio_device_get_clock() - Retrieve current timestamping clock for the device
+ * @indio_dev: IIO device structure containing the device
+ */
+static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
+{
+ return indio_dev->clock_id;
+}
+
+/**
* dev_to_iio_dev() - Get IIO device struct from a device struct
* @dev: The device embedded in the IIO device
*
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
new file mode 100644
index 000000000..23ca41515
--- /dev/null
+++ b/include/linux/iio/sw_device.h
@@ -0,0 +1,70 @@
+/*
+ * Industrial I/O software device interface
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __IIO_SW_DEVICE
+#define __IIO_SW_DEVICE
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/configfs.h>
+
+#define module_iio_sw_device_driver(__iio_sw_device_type) \
+ module_driver(__iio_sw_device_type, iio_register_sw_device_type, \
+ iio_unregister_sw_device_type)
+
+struct iio_sw_device_ops;
+
+struct iio_sw_device_type {
+ const char *name;
+ struct module *owner;
+ const struct iio_sw_device_ops *ops;
+ struct list_head list;
+ struct config_group *group;
+};
+
+struct iio_sw_device {
+ struct iio_dev *device;
+ struct iio_sw_device_type *device_type;
+ struct config_group group;
+};
+
+struct iio_sw_device_ops {
+ struct iio_sw_device* (*probe)(const char *);
+ int (*remove)(struct iio_sw_device *);
+};
+
+static inline
+struct iio_sw_device *to_iio_sw_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct iio_sw_device,
+ group);
+}
+
+int iio_register_sw_device_type(struct iio_sw_device_type *dt);
+void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
+
+struct iio_sw_device *iio_sw_device_create(const char *, const char *);
+void iio_sw_device_destroy(struct iio_sw_device *);
+
+int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
+void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
+
+static inline
+void iio_swd_group_init_type_name(struct iio_sw_device *d,
+ const char *name,
+ struct config_item_type *type)
+{
+#ifdef CONFIG_CONFIGFS_FS
+ config_group_init_type_name(&d->group, name, type);
+#endif
+}
+
+#endif /* __IIO_SW_DEVICE */
diff --git a/include/linux/init.h b/include/linux/init.h
index aedb254ab..6935d0247 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -77,12 +77,6 @@
#define __refdata __section(.ref.data)
#define __refconst __constsection(.ref.rodata)
-/* compatibility defines */
-#define __init_refok __ref
-#define __initdata_refok __refdata
-#define __exit_refok __ref
-
-
#ifdef MODULE
#define __exitused
#else
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 86078ff4e..2f40a5131 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -204,7 +204,7 @@ extern struct task_group root_task_group;
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
- .time_slice = HZ, \
+ .time_slice = 1000000, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
diff --git a/include/linux/input.h b/include/linux/input.h
index 1e967694e..a65e3b24f 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -95,7 +95,7 @@ struct input_value {
* @grab: input handle that currently has the device grabbed (via
* EVIOCGRAB ioctl). When a handle grabs a device it becomes sole
* recipient for all input events coming from the device
- * @event_lock: this spinlock is is taken when input core receives
+ * @event_lock: this spinlock is taken when input core receives
* and processes a new event for the device (in input_event()).
* Code that accesses and/or modifies parameters of a device
* (such as keymap or absmin, absmax, absfuzz, etc.) after device
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index c91e13761..09d22ccb9 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -10,7 +10,26 @@
#define _TOUCHSCREEN_H
struct input_dev;
+struct input_mt_pos;
-void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
+struct touchscreen_properties {
+ unsigned int max_x;
+ unsigned int max_y;
+ bool invert_x;
+ bool invert_y;
+ bool swap_x_y;
+};
+
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
+ struct touchscreen_properties *prop);
+
+void touchscreen_set_mt_pos(struct input_mt_pos *pos,
+ const struct touchscreen_properties *prop,
+ unsigned int x, unsigned int y);
+
+void touchscreen_report_pos(struct input_dev *input,
+ const struct touchscreen_properties *prop,
+ unsigned int x, unsigned int y,
+ bool multitouch);
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9fcabeb07..b6683f0ff 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
+
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
return 0;
}
+
+static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
+{
+ *nr_vecs = 1;
+ return NULL;
+}
#endif /* CONFIG_SMP */
/*
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b6..645ad06b5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
}
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
resource_size_t phys_addr;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return ioremap_wc(phys_addr, PAGE_SIZE);
+ return ioremap_wc(phys_addr, size);
}
static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
/* Non-atomic map/unmap */
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
return ((char __force __iomem *) mapping) + offset;
}
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
new file mode 100644
index 000000000..3d70ece10
--- /dev/null
+++ b/include/linux/iomap.h
@@ -0,0 +1,76 @@
+#ifndef LINUX_IOMAP_H
+#define LINUX_IOMAP_H 1
+
+#include <linux/types.h>
+
+struct fiemap_extent_info;
+struct inode;
+struct iov_iter;
+struct kiocb;
+struct vm_area_struct;
+struct vm_fault;
+
+/*
+ * Types of block ranges for iomap mappings:
+ */
+#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
+#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
+#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+
+/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
+
+/*
+ * Magic value for blkno:
+ */
+#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+
+struct iomap {
+ sector_t blkno; /* 1st sector of mapping, 512b units */
+ loff_t offset; /* file offset of mapping, bytes */
+ u64 length; /* length of mapping, bytes */
+ u16 type; /* type of mapping */
+ u16 flags; /* flags for mapping */
+ struct block_device *bdev; /* block device for I/O */
+};
+
+/*
+ * Flags for iomap_begin / iomap_end. No flag implies a read.
+ */
+#define IOMAP_WRITE (1 << 0)
+#define IOMAP_ZERO (1 << 1)
+
+struct iomap_ops {
+ /*
+ * Return the existing mapping at pos, or reserve space starting at
+ * pos for up to length, as long as we can do it as a single mapping.
+ * The actual length is returned in iomap->length.
+ */
+ int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap);
+
+ /*
+ * Commit and/or unreserve space previous allocated using iomap_begin.
+ * Written indicates the length of the successful write operation which
+ * needs to be commited, while the rest needs to be unreserved.
+ * Written might be zero if no data was written.
+ */
+ int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap);
+};
+
+ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
+ struct iomap_ops *ops);
+int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
+ bool *did_zero, struct iomap_ops *ops);
+int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ struct iomap_ops *ops);
+int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops);
+int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ loff_t start, loff_t len, struct iomap_ops *ops);
+
+#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 664683aed..a35fb8b42 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -152,6 +152,7 @@ struct iommu_dm_region {
* @domain_set_attr: Change domain attributes
* @get_dm_regions: Request list of direct mapping requirements for a device
* @put_dm_regions: Free list of direct mapping requirements for a device
+ * @apply_dm_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain
@@ -186,6 +187,8 @@ struct iommu_ops {
/* Request/Free a list of direct mapping requirements for a device */
void (*get_dm_regions)(struct device *dev, struct list_head *list);
void (*put_dm_regions)(struct device *dev, struct list_head *list);
+ void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
+ struct iommu_dm_region *region);
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 1eee6bcfc..d10e54f03 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -63,8 +63,6 @@ struct ipc_namespace {
};
extern struct ipc_namespace init_ipc_ns;
-extern atomic_t nr_ipc_ns;
-
extern spinlock_t mq_lock;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 838dbfa3c..78c5d5ae3 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -277,7 +277,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len);
*/
enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
- SI_PCI, SI_DEVICETREE, SI_DEFAULT
+ SI_PCI, SI_DEVICETREE, SI_LAST
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5c91b0b05..c6dbcd84a 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -283,6 +283,8 @@ struct tcp6_timewait_sock {
};
#if IS_ENABLED(CONFIG_IPV6)
+bool ipv6_mod_enabled(void);
+
static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
@@ -326,6 +328,11 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
+static inline bool ipv6_mod_enabled(void)
+{
+ return false;
+}
+
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return NULL;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cbb5a2c5d..0ac26c892 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -197,6 +197,7 @@ struct irq_data {
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
+ * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -212,6 +213,7 @@ enum {
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
IRQD_FORWARDED_TO_VCPU = (1 << 20),
+ IRQD_AFFINITY_MANAGED = (1 << 21),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
+static inline bool irqd_affinity_is_managed(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
+ * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @flags: chip specific flags
*/
struct irq_chip {
+ struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
+extern void handle_untracked_irq(struct irq_desc *desc);
extern void handle_percpu_irq(struct irq_desc *desc);
extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
+extern int irq_chip_pm_get(struct irq_data *data);
+extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
@@ -701,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner);
+ struct module *owner, const struct cpumask *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
- __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE)
+ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
irq_alloc_descs(-1, 0, 1, node)
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index dc493e0f0..99ac022ed 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -112,34 +112,76 @@
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
-#define GICR_PROPBASER_NonShareable (0U << 10)
-#define GICR_PROPBASER_InnerShareable (1U << 10)
-#define GICR_PROPBASER_OuterShareable (2U << 10)
-#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
-#define GICR_PROPBASER_nCnB (0U << 7)
-#define GICR_PROPBASER_nC (1U << 7)
-#define GICR_PROPBASER_RaWt (2U << 7)
-#define GICR_PROPBASER_RaWb (3U << 7)
-#define GICR_PROPBASER_WaWt (4U << 7)
-#define GICR_PROPBASER_WaWb (5U << 7)
-#define GICR_PROPBASER_RaWaWt (6U << 7)
-#define GICR_PROPBASER_RaWaWb (7U << 7)
-#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
-#define GICR_PROPBASER_IDBITS_MASK (0x1f)
-
-#define GICR_PENDBASER_NonShareable (0U << 10)
-#define GICR_PENDBASER_InnerShareable (1U << 10)
-#define GICR_PENDBASER_OuterShareable (2U << 10)
-#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
-#define GICR_PENDBASER_nCnB (0U << 7)
-#define GICR_PENDBASER_nC (1U << 7)
-#define GICR_PENDBASER_RaWt (2U << 7)
-#define GICR_PENDBASER_RaWb (3U << 7)
-#define GICR_PENDBASER_WaWt (4U << 7)
-#define GICR_PENDBASER_WaWb (5U << 7)
-#define GICR_PENDBASER_RaWaWt (6U << 7)
-#define GICR_PENDBASER_RaWaWb (7U << 7)
-#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+#define GIC_BASER_CACHE_nCnB 0ULL
+#define GIC_BASER_CACHE_SameAsInner 0ULL
+#define GIC_BASER_CACHE_nC 1ULL
+#define GIC_BASER_CACHE_RaWt 2ULL
+#define GIC_BASER_CACHE_RaWb 3ULL
+#define GIC_BASER_CACHE_WaWt 4ULL
+#define GIC_BASER_CACHE_WaWb 5ULL
+#define GIC_BASER_CACHE_RaWaWt 6ULL
+#define GIC_BASER_CACHE_RaWaWb 7ULL
+#define GIC_BASER_CACHE_MASK 7ULL
+#define GIC_BASER_NonShareable 0ULL
+#define GIC_BASER_InnerShareable 1ULL
+#define GIC_BASER_OuterShareable 2ULL
+#define GIC_BASER_SHAREABILITY_MASK 3ULL
+
+#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \
+ (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT)
+
+#define GIC_BASER_SHAREABILITY(reg, type) \
+ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT)
+
+#define GICR_PROPBASER_SHAREABILITY_SHIFT (10)
+#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_PROPBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK)
+#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK)
+#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK)
+#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_PROPBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable)
+
+#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
+#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
+#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
+#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
+#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
+#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb)
+
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
+#define GICR_PENDBASER_SHAREABILITY_SHIFT (10)
+#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_PENDBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK)
+#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK)
+#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK)
+#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_PENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)
+
+#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
+#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
+#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
+#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
+#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
+#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb)
+
+#define GICR_PENDBASER_PTZ BIT_ULL(62)
/*
* Re-Distributor registers, offsets from SGI_base
@@ -175,59 +217,91 @@
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
#define GITS_BASER 0x0100
+#define GITS_IDREGS_BASE 0xffd0
+#define GITS_PIDR0 0xffe0
+#define GITS_PIDR1 0xffe4
#define GITS_PIDR2 GICR_PIDR2
+#define GITS_PIDR4 0xffd0
+#define GITS_CIDR0 0xfff0
+#define GITS_CIDR1 0xfff4
+#define GITS_CIDR2 0xfff8
+#define GITS_CIDR3 0xfffc
#define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_QUIESCENT (1U << 31)
+#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
-
-#define GITS_CBASER_VALID (1UL << 63)
-#define GITS_CBASER_nCnB (0UL << 59)
-#define GITS_CBASER_nC (1UL << 59)
-#define GITS_CBASER_RaWt (2UL << 59)
-#define GITS_CBASER_RaWb (3UL << 59)
-#define GITS_CBASER_WaWt (4UL << 59)
-#define GITS_CBASER_WaWb (5UL << 59)
-#define GITS_CBASER_RaWaWt (6UL << 59)
-#define GITS_CBASER_RaWaWb (7UL << 59)
-#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
-#define GITS_CBASER_NonShareable (0UL << 10)
-#define GITS_CBASER_InnerShareable (1UL << 10)
-#define GITS_CBASER_OuterShareable (2UL << 10)
-#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_SHAREABILITY_SHIFT (10)
+#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
+#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
+#define GITS_CBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK)
+#define GITS_CBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK)
+#define GITS_CBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK)
+#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK
+
+#define GITS_CBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable)
+
+#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
+#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
+#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
+#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
+#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
+#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
#define GITS_BASER_NR_REGS 8
-#define GITS_BASER_VALID (1UL << 63)
-#define GITS_BASER_nCnB (0UL << 59)
-#define GITS_BASER_nC (1UL << 59)
-#define GITS_BASER_RaWt (2UL << 59)
-#define GITS_BASER_RaWb (3UL << 59)
-#define GITS_BASER_WaWt (4UL << 59)
-#define GITS_BASER_WaWb (5UL << 59)
-#define GITS_BASER_RaWaWt (6UL << 59)
-#define GITS_BASER_RaWaWb (7UL << 59)
-#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
-#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_INDIRECT (1ULL << 62)
+
+#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
+#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53)
+#define GITS_BASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK)
+#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK
+#define GITS_BASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK)
+#define GITS_BASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK)
+
+#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
+#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
+#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
+#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
+#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
+#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb)
+
+#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
-#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
-#define GITS_BASER_NonShareable (0UL << 10)
-#define GITS_BASER_InnerShareable (1UL << 10)
-#define GITS_BASER_OuterShareable (2UL << 10)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
-#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256
+#define GITS_BASER_PAGES_SHIFT (0)
+#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
@@ -238,12 +312,17 @@
#define GITS_BASER_TYPE_RESERVED6 6
#define GITS_BASER_TYPE_RESERVED7 7
+#define GITS_LVL1_ENTRY_SIZE (8UL)
+
/*
* ITS commands
*/
#define GITS_CMD_MAPD 0x08
#define GITS_CMD_MAPC 0x09
-#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MAPTI 0x0a
+/* older GIC documentation used MAPVI for this command */
+#define GITS_CMD_MAPVI GITS_CMD_MAPTI
+#define GITS_CMD_MAPI 0x0b
#define GITS_CMD_MOVI 0x01
#define GITS_CMD_DISCARD 0x0f
#define GITS_CMD_INV 0x0c
@@ -254,6 +333,23 @@
#define GITS_CMD_SYNC 0x05
/*
+ * ITS error numbers
+ */
+#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
+#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
+#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
+#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
+#define E_ITS_MAPD_DEVICE_OOR 0x010801
+#define E_ITS_MAPC_PROCNUM_OOR 0x010902
+#define E_ITS_MAPC_COLLECTION_OOR 0x010903
+#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04
+#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06
+#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07
+#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09
+#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01
+#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07
+
+/*
* CPU interface registers
*/
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index fd0518555..eafc965b3 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -101,9 +101,14 @@
#include <linux/irqdomain.h>
struct device_node;
+struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr);
+void gic_cpu_save(struct gic_chip_data *gic);
+void gic_cpu_restore(struct gic_chip_data *gic);
+void gic_dist_save(struct gic_chip_data *gic);
+void gic_dist_restore(struct gic_chip_data *gic);
/*
* Subdrivers that need some preparatory work can initialize their
@@ -112,6 +117,12 @@ int gic_cpu_if_down(unsigned int gic_nr);
int gic_of_init(struct device_node *node, struct device_node *parent);
/*
+ * Initialises and registers a non-root or child GIC chip. Memory for
+ * the gic_chip_data structure is dynamically allocated.
+ */
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
+
+/*
* Legacy platforms not converted to DT yet must use this to init
* their GIC
*/
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index f1f36e04d..ffb84604c 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -39,6 +39,7 @@ struct irq_domain;
struct of_device_id;
struct irq_chip;
struct irq_data;
+struct cpumask;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node);
+ irq_hw_number_t hwirq, int node,
+ const struct cpumask *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc);
+ bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern void irq_domain_activate_irq(struct irq_data *irq_data);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
@@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
+ NULL);
}
extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
@@ -452,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
return -1;
}
+static inline void irq_domain_free_irqs(unsigned int virq,
+ unsigned int nr_irqs) { }
+
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return false;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index efb232c5f..dfaa1f4dc 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -491,10 +491,6 @@ struct jbd2_journal_handle
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map h_lockdep_map;
-#endif
};
@@ -793,6 +789,7 @@ jbd2_time_diff(unsigned long start, unsigned long end)
* @j_proc_entry: procfs entry for the jbd statistics directory
* @j_stats: Overall statistics
* @j_private: An opaque pointer to fs-private information.
+ * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
*/
struct journal_s
@@ -1035,8 +1032,26 @@ struct journal_s
/* Precomputed journal UUID checksum for seeding other checksums */
__u32 j_csum_seed;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Lockdep entity to track transaction commit dependencies. Handles
+ * hold this "lock" for read, when we wait for commit, we acquire the
+ * "lock" for writing. This matches the properties of jbd2 journalling
+ * where the running transaction has to wait for all handles to be
+ * dropped to commit that transaction and also acquiring a handle may
+ * require transaction commit to finish.
+ */
+ struct lockdep_map j_trans_commit_map;
+#endif
};
+#define jbd2_might_wait_for_commit(j) \
+ do { \
+ rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \
+ rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \
+ } while (0)
+
/* journal feature predicate functions */
#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 68904469f..661af564f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -76,7 +76,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/bug.h>
extern bool static_key_initialized;
@@ -115,20 +114,8 @@ enum jump_label_type {
struct module;
-#include <linux/atomic.h>
-
#ifdef HAVE_JUMP_LABEL
-static inline int static_key_count(struct static_key *key)
-{
- /*
- * -1 means the first static_key_slow_inc() is in progress.
- * static_key_enabled() must return true, so return 1 here.
- */
- int n = atomic_read(&key->enabled);
- return n >= 0 ? n : 1;
-}
-
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
@@ -157,16 +144,29 @@ extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern int static_key_count(struct static_key *key);
+extern void static_key_enable(struct static_key *key);
+extern void static_key_disable(struct static_key *key);
+/*
+ * We should be using ATOMIC_INIT() for initializing .enabled, but
+ * the inclusion of atomic.h is problematic for inclusion of jump_label.h
+ * in 'low-level' headers. Thus, we are initializing .enabled with a
+ * raw value, but have added a BUILD_BUG_ON() to catch any issues in
+ * jump_label_init() see: kernel/jump_label.c.
+ */
#define STATIC_KEY_INIT_TRUE \
- { .enabled = ATOMIC_INIT(1), \
+ { .enabled = { 1 }, \
.entries = (void *)JUMP_TYPE_TRUE }
#define STATIC_KEY_INIT_FALSE \
- { .enabled = ATOMIC_INIT(0), \
+ { .enabled = { 0 }, \
.entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
+#include <linux/atomic.h>
+#include <linux/bug.h>
+
static inline int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
@@ -216,14 +216,6 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
-#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-
-#endif /* HAVE_JUMP_LABEL */
-
-#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#define jump_label_enabled static_key_enabled
-
static inline void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
@@ -244,6 +236,14 @@ static inline void static_key_disable(struct static_key *key)
static_key_slow_dec(key);
}
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
+
+#endif /* HAVE_JUMP_LABEL */
+
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
/* -------------------------------------------------------------------------- */
/*
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ac4b3c46a..d60030330 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -56,6 +56,7 @@ void kasan_cache_destroy(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
@@ -77,6 +78,7 @@ void kasan_free_shadow(const struct vm_struct *vm);
size_t ksize(const void *);
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+size_t kasan_metadata_size(struct kmem_cache *cache);
#else /* CONFIG_KASAN */
@@ -101,6 +103,8 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
+static inline void kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
@@ -121,6 +125,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
static inline void kasan_unpoison_slab(const void *ptr) { }
+static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
#endif /* CONFIG_KASAN */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index b33c7797e..15ec117ec 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -3,6 +3,21 @@
#include <generated/autoconf.h>
+#define __ARG_PLACEHOLDER_1 0,
+#define __take_second_arg(__ignored, val, ...) val
+
+/*
+ * The use of "&&" / "||" is limited in certain expressions.
+ * The followings enable to calculate "and" / "or" with macro expansion only.
+ */
+#define __and(x, y) ___and(x, y)
+#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
+#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
+
+#define __or(x, y) ___or(x, y)
+#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
+#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
+
/*
* Helper macros to use CONFIG_ options in C/CPP expressions. Note that
* these only work with boolean and tristate options.
@@ -16,11 +31,10 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define __ARG_PLACEHOLDER_1 0,
-#define config_enabled(cfg) _config_enabled(cfg)
-#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
-#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
-#define ___config_enabled(__ignored, val, ...) val
+#define config_enabled(cfg) ___is_defined(cfg)
+#define __is_defined(x) ___is_defined(x)
+#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
+#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
@@ -41,14 +55,13 @@
* This is similar to IS_ENABLED(), but returns false when invoked from
* built-in code when CONFIG_FOO is set to 'm'.
*/
-#define IS_REACHABLE(option) (config_enabled(option) || \
- (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
+ __and(IS_MODULE(option), __is_defined(MODULE)))
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
* 0 otherwise.
*/
-#define IS_ENABLED(option) \
- (IS_BUILTIN(option) || IS_MODULE(option))
+#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index a19bcf9e7..410decacf 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void);
static inline
int kdb_process_cpu(const struct task_struct *p)
{
- unsigned int cpu = task_thread_info(p)->cpu;
+ unsigned int cpu = task_cpu(p);
if (cpu > num_possible_cpus())
cpu = 0;
return cpu;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 94aa10ffe..d96a6118d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -11,7 +11,6 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
-#include <linux/dynamic_debug.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -451,6 +450,7 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
+extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 25a822f6f..44fda64ad 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -92,7 +92,6 @@ static inline void account_process_tick(struct task_struct *tsk, int user)
extern void account_process_tick(struct task_struct *, int user);
#endif
-extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks);
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e8acb2b43..d7437777b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,6 +14,8 @@
#if !defined(__ASSEMBLY__)
+#include <asm/io.h>
+
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC_CORE
@@ -41,7 +43,7 @@
#endif
#ifndef KEXEC_CONTROL_MEMORY_GFP
-#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY)
#endif
#ifndef KEXEC_CONTROL_PAGE_SIZE
@@ -228,12 +230,13 @@ extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
+int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
void crash_save_vmcoreinfo(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
void vmcoreinfo_append_str(const char *fmt, ...);
-unsigned long paddr_vmcoreinfo_note(void);
+phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
@@ -318,12 +321,51 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
+#ifndef page_to_boot_pfn
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+ return page_to_pfn(page);
+}
+#endif
+
+#ifndef boot_pfn_to_page
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+ return pfn_to_page(boot_pfn);
+}
+#endif
+
+#ifndef phys_to_boot_phys
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+ return phys;
+}
+#endif
+
+#ifndef boot_phys_to_phys
+static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
+{
+ return boot_phys;
+}
+#endif
+
+static inline unsigned long virt_to_boot_phys(void *addr)
+{
+ return phys_to_boot_phys(__pa((unsigned long)addr));
+}
+
+static inline void *boot_phys_to_virt(unsigned long entry)
+{
+ return phys_to_virt(boot_phys_to_phys(entry));
+}
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+static inline int kexec_crash_loaded(void) { return 0; }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index eeb307985..1e032a1dd 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,6 +4,11 @@
#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern struct attribute_group khugepaged_attr_group;
+
+extern int khugepaged_init(void);
+extern void khugepaged_destroy(void);
+extern int start_stop_khugepaged(void);
extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7ae216a39..5329b236d 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -19,21 +19,6 @@ struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
-int __ksm_enter(struct mm_struct *mm);
-void __ksm_exit(struct mm_struct *mm);
-
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
- return 0;
-}
-
-static inline void ksm_exit(struct mm_struct *mm)
-{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
- __ksm_exit(mm);
-}
static inline struct stable_node *page_stable_node(struct page *page)
{
@@ -43,8 +28,7 @@ static inline struct stable_node *page_stable_node(struct page *page)
static inline void set_page_stable_node(struct page *page,
struct stable_node *stable_node)
{
- page->mapping = (void *)stable_node +
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
}
/*
@@ -64,6 +48,33 @@ struct page *ksm_might_need_to_copy(struct page *page,
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+#ifdef CONFIG_KSM_LEGACY
+int __ksm_enter(struct mm_struct *mm);
+void __ksm_exit(struct mm_struct *mm);
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ return __ksm_enter(mm);
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ __ksm_exit(mm);
+}
+
+#elif defined(CONFIG_UKSM)
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+#endif /* !CONFIG_UKSM */
+
#else /* !CONFIG_KSM */
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -106,4 +117,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
+#include <linux/uksm.h>
+
#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c9c973a7..9c28b4d4c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -164,6 +164,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
+struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ gpa_t addr);
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
@@ -315,7 +317,13 @@ struct kvm_kernel_irq_routing_entry {
unsigned irqchip;
unsigned pin;
} irqchip;
- struct msi_msg msi;
+ struct {
+ u32 address_lo;
+ u32 address_hi;
+ u32 data;
+ u32 flags;
+ u32 devid;
+ } msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
};
@@ -371,7 +379,15 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+
+ /*
+ * created_vcpus is protected by kvm->lock, and is incremented
+ * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
+ * incremented after storing the kvm_vcpu pointer in vcpus,
+ * and is accessed atomically.
+ */
atomic_t online_vcpus;
+ int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
@@ -867,45 +883,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-/* must be called with irqs disabled */
-static inline void __kvm_guest_enter(void)
-{
- guest_enter();
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_cpu_is_enabled())
- rcu_virt_note_context_switch(smp_processor_id());
-}
-
-/* must be called with irqs disabled */
-static inline void __kvm_guest_exit(void)
-{
- guest_exit();
-}
-
-static inline void kvm_guest_enter(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __kvm_guest_enter();
- local_irq_restore(flags);
-}
-
-static inline void kvm_guest_exit(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __kvm_guest_exit();
- local_irq_restore(flags);
-}
-
/*
* search_memslots() and __gfn_to_memslot() are here because they are
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
@@ -1032,17 +1009,18 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#ifdef CONFIG_S390
#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
+#elif defined(CONFIG_ARM64)
+#define KVM_MAX_IRQ_ROUTES 4096
#else
#define KVM_MAX_IRQ_ROUTES 1024
#endif
-int kvm_setup_default_irq_routing(struct kvm *kvm);
-int kvm_setup_empty_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
void kvm_free_irq_routing(struct kvm *kvm);
@@ -1097,12 +1075,6 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-#else
-static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-#endif
-
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
@@ -1141,9 +1113,21 @@ struct kvm_device {
/* create, destroy, and name are mandatory */
struct kvm_device_ops {
const char *name;
+
+ /*
+ * create is called holding kvm->lock and any operations not suitable
+ * to do while holding the lock should be deferred to init (see
+ * below).
+ */
int (*create)(struct kvm_device *dev, u32 type);
/*
+ * init is called after create if create is successful and is called
+ * outside of holding kvm->lock.
+ */
+ void (*init)(struct kvm_device *dev);
+
+ /*
* Destroy is responsible for freeing dev.
*
* Destroy may be called before or after destructors are called
diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h
new file mode 100644
index 000000000..49b37ed8d
--- /dev/null
+++ b/include/linux/leds-lp3952.h
@@ -0,0 +1,125 @@
+/*
+ * LED driver for TI lp3952 controller
+ *
+ * Copyright (C) 2016, DAQRI, LLC.
+ * Author: Tony Makkiel <tony.makkiel@daqri.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef LEDS_LP3952_H_
+#define LEDS_LP3952_H_
+
+#define LP3952_NAME "lp3952"
+#define LP3952_CMD_REG_COUNT 8
+#define LP3952_BRIGHT_MAX 4
+#define LP3952_LABEL_MAX_LEN 15
+
+#define LP3952_REG_LED_CTRL 0x00
+#define LP3952_REG_R1_BLNK_TIME_CTRL 0x01
+#define LP3952_REG_R1_BLNK_CYCLE_CTRL 0x02
+#define LP3952_REG_G1_BLNK_TIME_CTRL 0x03
+#define LP3952_REG_G1_BLNK_CYCLE_CTRL 0x04
+#define LP3952_REG_B1_BLNK_TIME_CTRL 0x05
+#define LP3952_REG_B1_BLNK_CYCLE_CTRL 0x06
+#define LP3952_REG_ENABLES 0x0B
+#define LP3952_REG_PAT_GEN_CTRL 0x11
+#define LP3952_REG_RGB1_MAX_I_CTRL 0x12
+#define LP3952_REG_RGB2_MAX_I_CTRL 0x13
+#define LP3952_REG_CMD_0 0x50
+#define LP3952_REG_RESET 0x60
+#define REG_MAX LP3952_REG_RESET
+
+#define LP3952_PATRN_LOOP BIT(1)
+#define LP3952_PATRN_GEN_EN BIT(2)
+#define LP3952_INT_B00ST_LDR BIT(2)
+#define LP3952_ACTIVE_MODE BIT(6)
+#define LP3952_LED_MASK_ALL 0x3f
+
+/* Transition Time in ms */
+enum lp3952_tt {
+ TT0,
+ TT55,
+ TT110,
+ TT221,
+ TT422,
+ TT885,
+ TT1770,
+ TT3539
+};
+
+/* Command Execution Time in ms */
+enum lp3952_cet {
+ CET197,
+ CET393,
+ CET590,
+ CET786,
+ CET1180,
+ CET1376,
+ CET1573,
+ CET1769,
+ CET1966,
+ CET2163,
+ CET2359,
+ CET2556,
+ CET2763,
+ CET2949,
+ CET3146
+};
+
+/* Max Current in % */
+enum lp3952_colour_I_log_0 {
+ I0,
+ I7,
+ I14,
+ I21,
+ I32,
+ I46,
+ I71,
+ I100
+};
+
+enum lp3952_leds {
+ LP3952_BLUE_2,
+ LP3952_GREEN_2,
+ LP3952_RED_2,
+ LP3952_BLUE_1,
+ LP3952_GREEN_1,
+ LP3952_RED_1,
+ LP3952_LED_ALL
+};
+
+struct lp3952_ctrl_hdl {
+ struct led_classdev cdev;
+ char name[LP3952_LABEL_MAX_LEN];
+ enum lp3952_leds channel;
+ void *priv;
+};
+
+struct ptrn_gen_cmd {
+ union {
+ struct {
+ u16 tt:3;
+ u16 b:3;
+ u16 cet:4;
+ u16 g:3;
+ u16 r:3;
+ };
+ struct {
+ u8 lsb;
+ u8 msb;
+ } bytes;
+ };
+} __packed;
+
+struct lp3952_led_array {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ struct gpio_desc *enable_gpio;
+ struct lp3952_ctrl_hdl leds[LP3952_LED_ALL];
+};
+
+#endif /* LEDS_LP3952_H_ */
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index b8d6fffed..d215b4561 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -16,6 +16,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
+#include <dt-bindings/leds/leds-pca9532.h>
enum pca9532_state {
PCA9532_OFF = 0x0,
@@ -24,16 +25,14 @@ enum pca9532_state {
PCA9532_PWM1 = 0x3
};
-enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED,
- PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO };
-
struct pca9532_led {
u8 id;
struct i2c_client *client;
- char *name;
+ const char *name;
+ const char *default_trigger;
struct led_classdev ldev;
struct work_struct work;
- enum pca9532_type type;
+ u32 type;
enum pca9532_state state;
};
diff --git a/include/linux/leds.h b/include/linux/leds.h
index e5e7f2e80..8a3b5d296 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -325,10 +325,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
#endif /* CONFIG_LEDS_TRIGGERS */
/* Trigger specific functions */
-#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK
-extern void ledtrig_ide_activity(void);
+#ifdef CONFIG_LEDS_TRIGGER_DISK
+extern void ledtrig_disk_activity(void);
#else
-static inline void ledtrig_ide_activity(void) {}
+static inline void ledtrig_disk_activity(void) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
@@ -387,8 +387,16 @@ struct gpio_led_platform_data {
unsigned long *delay_off);
};
+#ifdef CONFIG_NEW_LEDS
struct platform_device *gpio_led_register_device(
int id, const struct gpio_led_platform_data *pdata);
+#else
+static inline struct platform_device *gpio_led_register_device(
+ int id, const struct gpio_led_platform_data *pdata)
+{
+ return 0;
+}
+#endif
enum cpu_led_event {
CPU_LED_IDLE_START, /* CPU enters idle */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d15c19e33..e37d4f99f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -146,13 +146,6 @@ enum {
ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
- /* protocol flags */
- ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
- ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
- ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
- ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
- ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
-
/* struct ata_device stuff */
ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
@@ -1039,58 +1032,29 @@ extern const unsigned long sata_deb_timing_long[];
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
-/*
- * protocol tests
- */
-static inline unsigned int ata_prot_flags(u8 prot)
-{
- switch (prot) {
- case ATA_PROT_NODATA:
- return 0;
- case ATA_PROT_PIO:
- return ATA_PROT_FLAG_PIO;
- case ATA_PROT_DMA:
- return ATA_PROT_FLAG_DMA;
- case ATA_PROT_NCQ:
- return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
- case ATAPI_PROT_NODATA:
- return ATA_PROT_FLAG_ATAPI;
- case ATAPI_PROT_PIO:
- return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
- case ATAPI_PROT_DMA:
- return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
- }
- return 0;
-}
-
-static inline int ata_is_atapi(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
-}
-
-static inline int ata_is_nodata(u8 prot)
+static inline bool ata_is_atapi(u8 prot)
{
- return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
+ return prot & ATA_PROT_FLAG_ATAPI;
}
-static inline int ata_is_pio(u8 prot)
+static inline bool ata_is_pio(u8 prot)
{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
+ return prot & ATA_PROT_FLAG_PIO;
}
-static inline int ata_is_dma(u8 prot)
+static inline bool ata_is_dma(u8 prot)
{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
+ return prot & ATA_PROT_FLAG_DMA;
}
-static inline int ata_is_ncq(u8 prot)
+static inline bool ata_is_ncq(u8 prot)
{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
+ return prot & ATA_PROT_FLAG_NCQ;
}
-static inline int ata_is_data(u8 prot)
+static inline bool ata_is_data(u8 prot)
{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
+ return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA);
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
@@ -1407,7 +1371,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
return ap->nr_pmp_links != 0;
}
-static inline int ata_is_host_link(const struct ata_link *link)
+static inline bool ata_is_host_link(const struct ata_link *link)
{
return link == &link->ap->link || link == link->ap->slave_link;
}
@@ -1422,7 +1386,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
return false;
}
-static inline int ata_is_host_link(const struct ata_link *link)
+static inline bool ata_is_host_link(const struct ata_link *link)
{
return 1;
}
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 0c3c30cbb..b519e137b 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -52,6 +52,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nd_namespace_label;
struct nvdimm_drvdata;
+
struct nd_mapping {
struct nvdimm *nvdimm;
struct nd_namespace_label **labels;
@@ -69,6 +70,7 @@ struct nd_mapping {
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
+ struct module *module;
char *provider_name;
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
@@ -99,13 +101,21 @@ struct nd_region_desc {
unsigned long flags;
};
+struct device;
+void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
+ resource_size_t offset, size_t size)
+{
+ return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
+}
+
struct nvdimm_bus;
struct module;
struct device;
struct nd_blk_region;
struct nd_blk_region_desc {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
struct nd_region_desc ndr_desc;
@@ -119,22 +129,22 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
- struct nvdimm_bus_descriptor *nfit_desc, struct module *module);
-#define nvdimm_bus_register(parent, desc) \
- __nvdimm_bus_register(parent, desc, THIS_MODULE)
+struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
+ struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
+struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
- unsigned long cmd_mask);
+ unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
@@ -156,4 +166,6 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
+void nvdimm_flush(struct nd_region *nd_region);
+int nvdimm_has_flush(struct nd_region *nd_region);
#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ef2c7d2e7..ba78b8306 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -1,7 +1,9 @@
#ifndef NVM_H
#define NVM_H
+#include <linux/blkdev.h>
#include <linux/types.h>
+#include <uapi/linux/lightnvm.h>
enum {
NVM_IO_OK = 0,
@@ -269,24 +271,15 @@ struct nvm_lun {
int lun_id;
int chnl_id;
- /* It is up to the target to mark blocks as closed. If the target does
- * not do it, all blocks are marked as open, and nr_open_blocks
- * represents the number of blocks in use
- */
- unsigned int nr_open_blocks; /* Number of used, writable blocks */
- unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
- unsigned int nr_free_blocks; /* Number of unused blocks */
- unsigned int nr_bad_blocks; /* Number of bad blocks */
-
spinlock_t lock;
+ unsigned int nr_free_blocks; /* Number of unused blocks */
struct nvm_block *blocks;
};
enum {
NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
- NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
+ NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
@@ -385,6 +378,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
@@ -455,6 +449,8 @@ struct nvm_tgt_type {
struct list_head list;
};
+extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
+
extern int nvm_register_tgt_type(struct nvm_tgt_type *);
extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
@@ -463,6 +459,9 @@ extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
typedef int (nvmm_register_fn)(struct nvm_dev *);
typedef void (nvmm_unregister_fn)(struct nvm_dev *);
+
+typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
+typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
struct nvm_lun *, unsigned long);
typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
@@ -488,9 +487,10 @@ struct nvmm_type {
nvmm_register_fn *register_mgr;
nvmm_unregister_fn *unregister_mgr;
+ nvmm_create_tgt_fn *create_tgt;
+ nvmm_remove_tgt_fn *remove_tgt;
+
/* Block administration callbacks */
- nvmm_get_blk_fn *get_blk_unlocked;
- nvmm_put_blk_fn *put_blk_unlocked;
nvmm_get_blk_fn *get_blk;
nvmm_put_blk_fn *put_blk;
nvmm_open_blk_fn *open_blk;
@@ -520,10 +520,6 @@ struct nvmm_type {
extern int nvm_register_mgr(struct nvmm_type *);
extern void nvm_unregister_mgr(struct nvmm_type *);
-extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
- struct nvm_lun *, unsigned long);
-extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
-
extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
unsigned long);
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
@@ -532,11 +528,13 @@ extern int nvm_register(struct request_queue *, char *,
struct nvm_dev_ops *);
extern void nvm_unregister(char *);
+void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
+
extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
- struct ppa_addr *, int, int);
+ const struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
diff --git a/include/linux/list.h b/include/linux/list.h
index 5356f4d66..5183138aa 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h)
}
/*
+ * Check whether the node is the only node of the head without
+ * accessing head:
+ */
+static inline bool
+hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
+{
+ return !n->next && n->pprev == &h->first;
+}
+
+/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7ae397669..101bf19c0 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1356,7 +1356,7 @@ union security_list_options {
struct super_block *newsb);
int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
int (*dentry_init_security)(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen);
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
new file mode 100644
index 000000000..6b55c938b
--- /dev/null
+++ b/include/linux/mailbox/brcm-message.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common header for Broadcom mailbox messages which is shared across
+ * Broadcom SoCs and Broadcom mailbox client drivers.
+ */
+
+#ifndef _LINUX_BRCM_MESSAGE_H_
+#define _LINUX_BRCM_MESSAGE_H_
+
+#include <linux/scatterlist.h>
+
+enum brcm_message_type {
+ BRCM_MESSAGE_UNKNOWN = 0,
+ BRCM_MESSAGE_SPU,
+ BRCM_MESSAGE_SBA,
+ BRCM_MESSAGE_MAX,
+};
+
+struct brcm_sba_command {
+ u64 cmd;
+#define BRCM_SBA_CMD_TYPE_A BIT(0)
+#define BRCM_SBA_CMD_TYPE_B BIT(1)
+#define BRCM_SBA_CMD_TYPE_C BIT(2)
+#define BRCM_SBA_CMD_HAS_RESP BIT(3)
+#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
+ u64 flags;
+ dma_addr_t input;
+ size_t input_len;
+ dma_addr_t resp;
+ size_t resp_len;
+ dma_addr_t output;
+ size_t output_len;
+};
+
+struct brcm_message {
+ enum brcm_message_type type;
+ union {
+ struct {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ } spu;
+ struct {
+ struct brcm_sba_command *cmds;
+ unsigned int cmds_count;
+ } sba;
+ };
+ void *ctx;
+ int error;
+};
+
+#endif /* _LINUX_BRCM_MESSAGE_H_ */
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index ea34a867c..d61023276 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -66,7 +66,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
}
#endif
-int mvebu_mbus_save_cpu_target(u32 *store_addr);
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 433e0c74d..a585b4b5f 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -14,6 +14,8 @@
#include <asm/io.h>
#include <linux/rtc.h> /* get the user-level API */
#include <asm/mc146818rtc.h> /* register access macros */
+#include <linux/bcd.h>
+#include <linux/delay.h>
#ifdef __KERNEL__
#include <linux/spinlock.h> /* spinlock_t */
@@ -120,4 +122,7 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
+unsigned int mc146818_get_time(struct rtc_time *time);
+int mc146818_set_time(struct rtc_time *time);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index a243dbba8..61f5b21b3 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -10,11 +10,13 @@
#ifndef __LINUX_MDIO_MUX_H
#define __LINUX_MDIO_MUX_H
#include <linux/device.h>
+#include <linux/phy.h>
int mdio_mux_init(struct device *dev,
int (*switch_fn) (int cur, int desired, void *data),
void **mux_handle,
- void *data);
+ void *data,
+ struct mii_bus *mux_bus);
void mdio_mux_uninit(void *mux_handle);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 3106ac1c8..2925da235 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -73,8 +73,8 @@ extern bool movable_node_enabled;
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
+ phys_addr_t start, phys_addr_t end,
+ int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -110,7 +110,7 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
phys_addr_t *out_end, int *out_nid);
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
- phys_addr_t *out_end);
+ phys_addr_t *out_end);
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
@@ -148,7 +148,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
__next_mem_range_rev(&i, nid, flags, type_a, type_b,\
- p_start, p_end, p_nid); \
+ p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
@@ -163,8 +163,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
* is initialized.
*/
#define for_each_reserved_mem_region(i, p_start, p_end) \
- for (i = 0UL, \
- __next_reserved_mem_region(&i, p_start, p_end); \
+ for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
i != (u64)ULLONG_MAX; \
__next_reserved_mem_region(&i, p_start, p_end))
@@ -333,6 +332,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
@@ -403,15 +403,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
}
#define for_each_memblock(memblock_type, region) \
- for (region = memblock.memblock_type.regions; \
+ for (region = memblock.memblock_type.regions; \
region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
region++)
#define for_each_memblock_type(memblock_type, rgn) \
- idx = 0; \
- rgn = &memblock_type->regions[idx]; \
- for (idx = 0; idx < memblock_type->cnt; \
- idx++,rgn = &memblock_type->regions[idx])
+ for (idx = 0, rgn = &memblock_type->regions[0]; \
+ idx < memblock_type->cnt; \
+ idx++, rgn = &memblock_type->regions[idx])
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 56e6069d2..5d8ca6e02 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
- MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_SOCK,
@@ -60,7 +60,7 @@ enum mem_cgroup_stat_index {
};
struct mem_cgroup_reclaim_cookie {
- struct zone *zone;
+ pg_data_t *pgdat;
int priority;
unsigned int generation;
};
@@ -118,7 +118,7 @@ struct mem_cgroup_reclaim_iter {
/*
* per-zone information in memory controller.
*/
-struct mem_cgroup_per_zone {
+struct mem_cgroup_per_node {
struct lruvec lruvec;
unsigned long lru_size[NR_LRU_LISTS];
@@ -132,10 +132,6 @@ struct mem_cgroup_per_zone {
/* use container_of */
};
-struct mem_cgroup_per_node {
- struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
-};
-
struct mem_cgroup_threshold {
struct eventfd_ctx *eventfd;
unsigned long threshold;
@@ -314,8 +310,46 @@ void mem_cgroup_uncharge_list(struct list_head *page_list);
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
-struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
-struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
+static struct mem_cgroup_per_node *
+mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
+{
+ return memcg->nodeinfo[nid];
+}
+
+/**
+ * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
+ * @node: node of the wanted lruvec
+ * @memcg: memcg of the wanted lruvec
+ *
+ * Returns the lru list vector holding pages for a given @node or a given
+ * @memcg and @zone. This can be the node lruvec, if the memory controller
+ * is disabled.
+ */
+static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
+ struct mem_cgroup *memcg)
+{
+ struct mem_cgroup_per_node *mz;
+ struct lruvec *lruvec;
+
+ if (mem_cgroup_disabled()) {
+ lruvec = node_lruvec(pgdat);
+ goto out;
+ }
+
+ mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ lruvec = &mz->lruvec;
+out:
+ /*
+ * Since a node can be onlined after the mem_cgroup was created,
+ * we have to be prepared to initialize lruvec->pgdat here;
+ * and if offlined then reonlined, we need to reinitialize it.
+ */
+ if (unlikely(lruvec->pgdat != pgdat))
+ lruvec->pgdat = pgdat;
+ return lruvec;
+}
+
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
@@ -404,9 +438,9 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
- struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_per_node *mz;
- mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return mz->lru_size[lru];
}
@@ -477,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1);
}
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
@@ -568,16 +602,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{
}
-static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
+ struct mem_cgroup *memcg)
{
- return &zone->lruvec;
+ return node_lruvec(pgdat);
}
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct zone *zone)
+ struct pglist_data *pgdat)
{
- return &zone->lruvec;
+ return &pgdat->lruvec;
}
static inline bool mm_match_cgroup(struct mm_struct *mm,
@@ -681,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
}
static inline
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
@@ -749,6 +783,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
}
#endif
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
+void memcg_kmem_put_cache(struct kmem_cache *cachep);
+int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+ struct mem_cgroup *memcg);
+int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
+void memcg_kmem_uncharge(struct page *page, int order);
+
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
extern struct static_key_false memcg_kmem_enabled_key;
@@ -770,22 +811,6 @@ static inline bool memcg_kmem_enabled(void)
}
/*
- * In general, we'll do everything in our power to not incur in any overhead
- * for non-memcg users for the kmem functions. Not even a function call, if we
- * can avoid it.
- *
- * Therefore, we'll inline all those functions so that in the best case, we'll
- * see that kmemcg is off for everybody and proceed quickly. If it is on,
- * we'll still do most of the flag checking inline. We check a lot of
- * conditions, but because they are pretty simple, they are expected to be
- * fast.
- */
-int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
-int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void __memcg_kmem_uncharge(struct page *page, int order);
-
-/*
* helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
* will return -1 when this is not a kmem-limited memcg.
@@ -795,67 +820,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
-void __memcg_kmem_put_cache(struct kmem_cache *cachep);
-
-static inline bool __memcg_kmem_bypass(void)
-{
- if (!memcg_kmem_enabled())
- return true;
- if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
- return true;
- return false;
-}
-
-/**
- * memcg_kmem_charge: charge a kmem page
- * @page: page to charge
- * @gfp: reclaim mode
- * @order: allocation order
- *
- * Returns 0 on success, an error code on failure.
- */
-static __always_inline int memcg_kmem_charge(struct page *page,
- gfp_t gfp, int order)
-{
- if (__memcg_kmem_bypass())
- return 0;
- if (!(gfp & __GFP_ACCOUNT))
- return 0;
- return __memcg_kmem_charge(page, gfp, order);
-}
-
-/**
- * memcg_kmem_uncharge: uncharge a kmem page
- * @page: page to uncharge
- * @order: allocation order
- */
-static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
-{
- if (memcg_kmem_enabled())
- __memcg_kmem_uncharge(page, order);
-}
-
-/**
- * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
- * @cachep: the original global kmem cache
- *
- * All memory allocated from a per-memcg cache is charged to the owner memcg.
- */
-static __always_inline struct kmem_cache *
-memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
-{
- if (__memcg_kmem_bypass())
- return cachep;
- return __memcg_kmem_get_cache(cachep, gfp);
-}
-
-static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
-{
- if (memcg_kmem_enabled())
- __memcg_kmem_put_cache(cachep);
-}
-
/**
* memcg_kmem_update_page_stat - update kmem page state statistics
* @page: the page
@@ -878,15 +842,6 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
-{
- return 0;
-}
-
-static inline void memcg_kmem_uncharge(struct page *page, int order)
-{
-}
-
static inline int memcg_cache_id(struct mem_cgroup *memcg)
{
return -1;
@@ -900,16 +855,6 @@ static inline void memcg_put_cache_ids(void)
{
}
-static inline struct kmem_cache *
-memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
-{
- return cachep;
-}
-
-static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
-{
-}
-
static inline void memcg_kmem_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 5145620ba..01033fade 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -284,5 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
+extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index bcaa63413..93416196b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -26,7 +26,7 @@ struct vmem_altmap {
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
-#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE)
+#ifdef CONFIG_ZONE_DEVICE
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
#else
static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index d409ceb22..c118a7ec9 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -350,7 +350,7 @@ static inline int pm80x_dev_suspend(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- set_bit((1 << irq), &chip->wu_flag);
+ set_bit(irq, &chip->wu_flag);
return 0;
}
@@ -362,7 +362,7 @@ static inline int pm80x_dev_resume(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- clear_bit((1 << irq), &chip->wu_flag);
+ clear_bit(irq, &chip->wu_flag);
return 0;
}
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 689312745..01024d1ae 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -37,12 +37,6 @@ static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
return ab8500_sysctrl_write(reg, bits, 0);
}
-/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
-struct ab8500_sysctrl_platform_data {
- u8 initial_req_buf_config[8];
- u16 (*reboot_reason_code)(const char *cmd);
-};
-
/* Registers */
#define AB8500_TURNONSTATUS 0x100
#define AB8500_RESETSTATUS 0x101
diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h
new file mode 100644
index 000000000..45a5e6e7d
--- /dev/null
+++ b/include/linux/mfd/altera-a10sr.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Declarations for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from DA9052
+ */
+
+#ifndef __MFD_ALTERA_A10SR_H
+#define __MFD_ALTERA_A10SR_H
+
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Write registers are always on even addresses */
+#define WRITE_REG_MASK 0xFE
+/* Odd registers are always on odd addresses */
+#define READ_REG_MASK 0x01
+
+#define ALTR_A10SR_BITS_PER_REGISTER 8
+/*
+ * To find the correct register, we divide the input GPIO by
+ * the number of GPIO in each register. We then need to multiply
+ * by 2 because the reads are at odd addresses.
+ */
+#define ALTR_A10SR_REG_OFFSET(X) (((X) / ALTR_A10SR_BITS_PER_REGISTER) << 1)
+#define ALTR_A10SR_REG_BIT(X) ((X) % ALTR_A10SR_BITS_PER_REGISTER)
+#define ALTR_A10SR_REG_BIT_CHG(X, Y) ((X) << ALTR_A10SR_REG_BIT(Y))
+#define ALTR_A10SR_REG_BIT_MASK(X) (1 << ALTR_A10SR_REG_BIT(X))
+
+/* Arria10 System Controller Register Defines */
+#define ALTR_A10SR_NOP 0x00 /* No Change */
+#define ALTR_A10SR_VERSION_READ 0x00 /* MAX5 Version Read */
+
+#define ALTR_A10SR_LED_REG 0x02 /* LED - Upper 4 bits */
+/* LED register Bit Definitions */
+#define ALTR_A10SR_LED_VALID_SHIFT 4 /* LED - Upper 4 bits valid */
+#define ALTR_A10SR_OUT_VALID_RANGE_LO ALTR_A10SR_LED_VALID_SHIFT
+#define ALTR_A10SR_OUT_VALID_RANGE_HI 7
+
+#define ALTR_A10SR_PBDSW_REG 0x04 /* PB & DIP SW - Input only */
+#define ALTR_A10SR_PBDSW_IRQ_REG 0x06 /* PB & DIP SW Flag Clear */
+/* Pushbutton & DIP Switch Bit Definitions */
+#define ALTR_A10SR_IN_VALID_RANGE_LO 8
+#define ALTR_A10SR_IN_VALID_RANGE_HI 15
+
+#define ALTR_A10SR_PWR_GOOD1_REG 0x08 /* Power Good1 Read */
+#define ALTR_A10SR_PWR_GOOD2_REG 0x0A /* Power Good2 Read */
+#define ALTR_A10SR_PWR_GOOD3_REG 0x0C /* Power Good3 Read */
+#define ALTR_A10SR_FMCAB_REG 0x0E /* FMCA/B & PCIe Pwr Enable */
+#define ALTR_A10SR_HPS_RST_REG 0x10 /* HPS Reset */
+#define ALTR_A10SR_USB_QSPI_REG 0x12 /* USB, BQSPI, FILE Reset */
+#define ALTR_A10SR_SFPA_REG 0x14 /* SFPA Control Reg */
+#define ALTR_A10SR_SFPB_REG 0x16 /* SFPB Control Reg */
+#define ALTR_A10SR_I2C_M_REG 0x18 /* I2C Master Select */
+#define ALTR_A10SR_WARM_RST_REG 0x1A /* HPS Warm Reset */
+#define ALTR_A10SR_WR_KEY_REG 0x1C /* HPS Warm Reset Key */
+#define ALTR_A10SR_PMBUS_REG 0x1E /* HPS PM Bus */
+
+/**
+ * struct altr_a10sr - Altera Max5 MFD device private data structure
+ * @dev: : this device
+ * @regmap: the regmap assigned to the parent device.
+ */
+struct altr_a10sr {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* __MFD_ALTERA_A10SR_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index d55a42297..58ab4c0fe 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -14,6 +14,7 @@
#define _WM_ARIZONA_CORE_H
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/arizona/pdata.h>
@@ -148,8 +149,17 @@ struct arizona {
uint16_t dac_comp_coeff;
uint8_t dac_comp_enabled;
struct mutex dac_comp_lock;
+
+ struct blocking_notifier_head notifier;
};
+static inline int arizona_call_notifiers(struct arizona *arizona,
+ unsigned long event,
+ void *data)
+{
+ return blocking_notifier_call_chain(&arizona->notifier, event, data);
+}
+
int arizona_clk32k_enable(struct arizona *arizona);
int arizona_clk32k_disable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index cd7e78eae..0d06c5d0a 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -856,12 +856,6 @@
#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
-#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
-#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
-#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
-#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
-#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
-#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
#define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
#define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 13b630c10..7e7a8d4b4 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -949,6 +949,37 @@ struct ec_params_pwm_set_fan_duty {
uint32_t percent;
} __packed;
+#define EC_CMD_PWM_SET_DUTY 0x25
+/* 16 bit duty cycle, 0xffff = 100% */
+#define EC_PWM_MAX_DUTY 0xffff
+
+enum ec_pwm_type {
+ /* All types, indexed by board-specific enum pwm_channel */
+ EC_PWM_TYPE_GENERIC = 0,
+ /* Keyboard backlight */
+ EC_PWM_TYPE_KB_LIGHT,
+ /* Display backlight */
+ EC_PWM_TYPE_DISPLAY_LIGHT,
+ EC_PWM_TYPE_COUNT,
+};
+
+struct ec_params_pwm_set_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __packed;
+
+#define EC_CMD_PWM_GET_DUTY 0x26
+
+struct ec_params_pwm_get_duty {
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __packed;
+
+struct ec_response_pwm_get_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+} __packed;
+
/*****************************************************************************/
/*
* Lightbar commands. This looks worse than it is. Since we only use one HOST
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644
index 000000000..304985e28
--- /dev/null
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n) ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n) ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n) ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n) ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD BIT(16)
+#define CFGCHIP1_HPIENA BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC BIT(12)
+#define CFGCHIP1_AMUTESEL0(n) ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD BIT(17)
+#define CFGCHIP2_VBUSSENSE BIT(16)
+#define CFGCHIP2_RESET BIT(15)
+#define CFGCHIP2_OTGMODE(n) ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX BIT(11)
+#define CFGCHIP2_PHYPWRDN BIT(10)
+#define CFGCHIP2_OTGPWRDN BIT(9)
+#define CFGCHIP2_DATPOL BIT(8)
+#define CFGCHIP2_USB1SUSPENDM BIT(7)
+#define CFGCHIP2_PHY_PLLON BIT(6)
+#define CFGCHIP2_SESENDEN BIT(5)
+#define CFGCHIP2_VBDTCTEN BIT(4)
+#define CFGCHIP2_REFFREQ(n) ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
+#define CFGCHIP3_PRUEVTSEL BIT(3)
+#define CFGCHIP3_DIV45PENA BIT(2)
+#define CFGCHIP3_EMA_CLKSRC BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0 BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index bf5109d38..5d3746014 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -178,16 +178,6 @@ enum ddr_pwrst {
#define DB8500_PRCMU_LEGACY_OFFSET 0xDD4
-struct prcmu_pdata
-{
- bool enable_set_ddr_opp;
- bool enable_ape_opp_100_voltage;
- struct ab8500_platform_data *ab_platdata;
- u32 version_offset;
- u32 legacy_offset;
- u32 adt_offset;
-};
-
#define PRCMU_FW_PROJECT_U8500 2
#define PRCMU_FW_PROJECT_U8400 3
#define PRCMU_FW_PROJECT_U9500 4 /* Customer specific */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index dbbe9a644..62f03c2b1 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -34,14 +34,23 @@
#define PMU_VER_START 0x10
#define PMU_VER_END 0x38
-#define RESERVE_INT BIT(7)
-#define PWRON_D20R_INT BIT(6)
-#define PWRON_D20F_INT BIT(5)
-#define PWRON_D4SR_INT BIT(4)
-#define VSYS_6P0_D200UR_INT BIT(3)
-#define VSYS_UV_D3R_INT BIT(2)
-#define VSYS_2P5_R_INT BIT(1)
-#define OTMP_D1R_INT BIT(0)
+#define RESERVE_INT 7
+#define PWRON_D20R_INT 6
+#define PWRON_D20F_INT 5
+#define PWRON_D4SR_INT 4
+#define VSYS_6P0_D200UR_INT 3
+#define VSYS_UV_D3R_INT 2
+#define VSYS_2P5_R_INT 1
+#define OTMP_D1R_INT 0
+
+#define RESERVE_INT_MASK BIT(RESERVE_INT)
+#define PWRON_D20R_INT_MASK BIT(PWRON_D20R_INT)
+#define PWRON_D20F_INT_MASK BIT(PWRON_D20F_INT)
+#define PWRON_D4SR_INT_MASK BIT(PWRON_D4SR_INT)
+#define VSYS_6P0_D200UR_INT_MASK BIT(VSYS_6P0_D200UR_INT)
+#define VSYS_UV_D3R_INT_MASK BIT(VSYS_UV_D3R_INT)
+#define VSYS_2P5_R_INT_MASK BIT(VSYS_2P5_R_INT)
+#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
struct resource *res;
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index c72d5344f..cadc65439 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -20,6 +20,7 @@
#define RN5T618_OTPVER 0x01
#define RN5T618_IODAC 0x02
#define RN5T618_VINDAC 0x03
+#define RN5T618_OUT32KEN 0x05
#define RN5T618_CPUCNT 0x06
#define RN5T618_PSWR 0x07
#define RN5T618_PONHIS 0x09
@@ -38,6 +39,7 @@
#define RN5T618_DC1_SLOT 0x16
#define RN5T618_DC2_SLOT 0x17
#define RN5T618_DC3_SLOT 0x18
+#define RN5T618_DC4_SLOT 0x19
#define RN5T618_LDO1_SLOT 0x1b
#define RN5T618_LDO2_SLOT 0x1c
#define RN5T618_LDO3_SLOT 0x1d
@@ -54,12 +56,16 @@
#define RN5T618_DC2CTL2 0x2f
#define RN5T618_DC3CTL 0x30
#define RN5T618_DC3CTL2 0x31
+#define RN5T618_DC4CTL 0x32
+#define RN5T618_DC4CTL2 0x33
#define RN5T618_DC1DAC 0x36
#define RN5T618_DC2DAC 0x37
#define RN5T618_DC3DAC 0x38
+#define RN5T618_DC4DAC 0x39
#define RN5T618_DC1DAC_SLP 0x3b
#define RN5T618_DC2DAC_SLP 0x3c
#define RN5T618_DC3DAC_SLP 0x3d
+#define RN5T618_DC4DAC_SLP 0x3e
#define RN5T618_DCIREN 0x40
#define RN5T618_DCIRQ 0x41
#define RN5T618_DCIRMON 0x42
@@ -211,6 +217,7 @@ enum {
RN5T618_DCDC1,
RN5T618_DCDC2,
RN5T618_DCDC3,
+ RN5T618_DCDC4,
RN5T618_LDO1,
RN5T618_LDO2,
RN5T618_LDO3,
@@ -221,8 +228,14 @@ enum {
RN5T618_REG_NUM,
};
+enum {
+ RN5T567 = 0,
+ RN5T618,
+};
+
struct rn5t618 {
struct regmap *regmap;
+ long variant;
};
#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index cb8388391..de748bc75 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -62,6 +62,7 @@ enum {
struct stmpe_variant_info;
struct stmpe_client_info;
+struct stmpe_platform_data;
/**
* struct stmpe - STMPE MFD structure
@@ -117,25 +118,4 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
-/**
- * struct stmpe_platform_data - STMPE platform data
- * @id: device id to distinguish between multiple STMPEs on the same board
- * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
- * @irq_trigger: IRQ trigger to use for the interrupt to the host
- * @autosleep: bool to enable/disable stmpe autosleep
- * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
- * @irq_over_gpio: true if gpio is used to get irq
- * @irq_gpio: gpio number over which irq will be requested (significant only if
- * irq_over_gpio is true)
- */
-struct stmpe_platform_data {
- int id;
- unsigned int blocks;
- unsigned int irq_trigger;
- bool autosleep;
- bool irq_over_gpio;
- int irq_gpio;
- int autosleep_timeout;
-};
-
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 175c82699..7f55b8b41 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -153,7 +153,7 @@
struct ti_tscadc_dev {
struct device *dev;
- struct regmap *regmap_tscadc;
+ struct regmap *regmap;
void __iomem *tscadc_base;
int irq;
int used_cells; /* 1-2 */
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index ac7fba44d..1c8823149 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -257,6 +257,7 @@ struct tps65217 {
unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
+ u8 *strobes;
};
static inline struct tps65217 *dev_to_tps65217(struct device *dev)
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index d58f3b5f5..7fdf5326f 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -246,6 +246,7 @@ enum tps65218_irqs {
* @name: Voltage regulator name
* @min_uV: minimum micro volts
* @max_uV: minimum micro volts
+ * @strobe: sequencing strobe value for the regulator
*
* This data is used to check the regualtor voltage limits while setting.
*/
@@ -254,6 +255,7 @@ struct tps_info {
const char *name;
int min_uV;
int max_uV;
+ int strobe;
};
/**
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8e95cd87c..36795a1be 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -226,6 +226,7 @@ struct twl6040 {
struct regmap_irq_chip_data *irq_data;
struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct clk *clk32k;
+ struct clk *mclk;
struct mutex mutex;
struct mutex irq_mutex;
struct mfd_cell cells[TWL6040_CELLS];
@@ -237,8 +238,8 @@ struct twl6040 {
/* PLL configuration */
int pll;
- unsigned int sysclk;
- unsigned int mclk;
+ unsigned int sysclk_rate;
+ unsigned int mclk_rate;
unsigned int irq;
unsigned int irq_ready;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 2e5b194b9..257173e00 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,6 +37,7 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
+#define MICREL_PHY_FXEN 0x00000002
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 9b50325e4..ae8d475a9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -37,6 +37,8 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
@@ -69,6 +71,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_COMPACTION
+extern int PageMovable(struct page *page);
+extern void __SetPageMovable(struct page *page, struct address_space *mapping);
+extern void __ClearPageMovable(struct page *page);
+#else
+static inline int PageMovable(struct page *page) { return 0; };
+static inline void __SetPageMovable(struct page *page,
+ struct address_space *mapping)
+{
+}
+static inline void __ClearPageMovable(struct page *page)
+{
+}
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d46a0e7f1..42da3552f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -220,6 +220,7 @@ enum {
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
+ MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
};
enum {
@@ -536,6 +537,7 @@ struct mlx4_caps {
int max_rq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
+ int max_tc_eth;
u32 *qp0_qkey;
u32 *qp0_proxy;
u32 *qp1_proxy;
@@ -1341,6 +1343,9 @@ enum {
VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
};
+enum {
+ MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2,
+};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
@@ -1381,6 +1386,9 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+ const u32 offset[], u32 value[],
+ size_t array_len, u8 port);
u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
@@ -1495,6 +1503,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
u16 offset, u16 size, u8 *data);
+int mlx4_max_tc(struct mlx4_dev *dev);
/* Returns true if running in low memory profile (kdump kernel) */
static inline bool mlx4_low_memory_profile(void)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 587cdf943..deaa22172 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -291,16 +291,18 @@ enum {
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
+union mlx4_wqe_qpn_vlan {
+ struct {
+ __be16 vlan_tag;
+ u8 ins_vlan;
+ u8 fence_size;
+ };
+ __be32 bf_qpn;
+};
+
struct mlx4_wqe_ctrl_seg {
__be32 owner_opcode;
- union {
- struct {
- __be16 vlan_tag;
- u8 ins_vlan;
- u8 fence_size;
- };
- __be32 bf_qpn;
- };
+ union mlx4_wqe_qpn_vlan qpn_vlan;
/*
* High 24 bits are SRC remote buffer; low 8 bits are flags:
* [7] SO (strong ordering)
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2be976dd4..2566f6d64 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -58,6 +58,8 @@ struct mlx5_core_cq {
void (*comp)(struct mlx5_core_cq *);
void *priv;
} tasklet_ctx;
+ int reset_notify_added;
+ struct list_head reset_notify;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 73a484798..0b6d15cdd 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -129,6 +129,13 @@ __mlx5_mask(typ, fld))
tmp; \
})
+enum mlx5_inline_modes {
+ MLX5_INLINE_MODE_NONE,
+ MLX5_INLINE_MODE_L2,
+ MLX5_INLINE_MODE_IP,
+ MLX5_INLINE_MODE_TCP_UDP,
+};
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -1330,6 +1337,7 @@ enum mlx5_cap_type {
MLX5_CAP_ESWITCH,
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
+ MLX5_CAP_QOS,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1414,6 +1422,9 @@ enum mlx5_cap_type {
MLX5_GET(vector_calc_cap, \
mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+#define MLX5_CAP_QOS(mdev, cap)\
+ MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index fd72ecf0c..ccea6fb16 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -46,6 +46,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
+#include <linux/mlx5/srq.h>
enum {
MLX5_RQ_BITMASK_VSD = 1 << 1,
@@ -469,7 +470,7 @@ struct mlx5_irq_info {
};
struct mlx5_fc_stats {
- struct list_head list;
+ struct rb_root counters;
struct list_head addlist;
/* protect addlist add/splice operations */
spinlock_t addlist_lock;
@@ -481,6 +482,21 @@ struct mlx5_fc_stats {
struct mlx5_eswitch;
+struct mlx5_rl_entry {
+ u32 rate;
+ u16 index;
+ u16 refcount;
+};
+
+struct mlx5_rl_table {
+ /* protect rate limit table */
+ struct mutex rl_lock;
+ u16 max_size;
+ u32 max_rate;
+ u32 min_rate;
+ struct mlx5_rl_entry *rl_entry;
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -535,15 +551,12 @@ struct mlx5_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
unsigned long pci_dev_data;
- struct mlx5_flow_root_namespace *root_ns;
- struct mlx5_flow_root_namespace *fdb_root_ns;
- struct mlx5_flow_root_namespace *esw_egress_root_ns;
- struct mlx5_flow_root_namespace *esw_ingress_root_ns;
-
struct mlx5_fc_stats fc_stats;
+ struct mlx5_rl_table rl_table;
};
enum mlx5_device_state {
@@ -562,6 +575,18 @@ enum mlx5_pci_status {
MLX5_PCI_STATUS_ENABLED,
};
+struct mlx5_td {
+ struct list_head tirs_list;
+ u32 tdn;
+};
+
+struct mlx5e_resources {
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ struct mlx5_td td;
+ struct mlx5_core_mkey mkey;
+};
+
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
@@ -586,6 +611,7 @@ struct mlx5_core_dev {
struct mlx5_profile *profile;
atomic_t num_qps;
u32 issi;
+ struct mlx5e_resources mlx5e_res;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
@@ -773,11 +799,10 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen,
- int is_xrc);
+ struct mlx5_srq_attr *in);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out);
+ struct mlx5_srq_attr *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
@@ -862,6 +887,12 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u8 port_num, void *out, size_t sz);
+int mlx5_init_rl_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
@@ -939,6 +970,11 @@ static inline int mlx5_get_gid_table_len(u16 param)
return 8 * (1 << param);
}
+static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
+{
+ return !!(dev->priv.rl_table.max_size);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4b7a107d9..e036d6030 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,8 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_OFFLOADS,
+ MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
@@ -67,6 +69,12 @@ struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
+struct mlx5_flow_spec {
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+ u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -115,9 +123,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
*/
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index e955a2859..d1f9a581a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -123,6 +123,10 @@ enum {
MLX5_CMD_OP_DRAIN_DCT = 0x712,
MLX5_CMD_OP_QUERY_DCT = 0x713,
MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLX5_CMD_OP_CREATE_XRQ = 0x717,
+ MLX5_CMD_OP_DESTROY_XRQ = 0x718,
+ MLX5_CMD_OP_QUERY_XRQ = 0x719,
+ MLX5_CMD_OP_ARM_XRQ = 0x71a,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -139,6 +143,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
+ MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802,
@@ -362,7 +368,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
};
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_at_0[0x20];
+ u8 reserved_at_0[0x8];
+ u8 source_sqn[0x18];
u8 reserved_at_20[0x10];
u8 source_port[0x10];
@@ -508,6 +515,17 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 reserved_at_20[0x7e0];
};
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 reserved_0[0x1f];
+ u8 reserved_1[0x20];
+ u8 packet_pacing_max_rate[0x20];
+ u8 packet_pacing_min_rate[0x20];
+ u8 reserved_2[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+ u8 reserved_3[0x760];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -518,7 +536,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5];
- u8 reserved_at_10[0x4];
+ u8 reserved_at_10[0x2];
+ u8 wqe_inline_mode[0x2];
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
u8 scatter_fcs[0x1];
@@ -747,7 +766,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
- u8 reserved_at_182[0x4];
+ u8 retransmission_q_counters[0x1];
+ u8 reserved_at_183[0x3];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -774,7 +794,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1d0[0x6];
+ u8 reserved_at_1d0[0x1];
+ u8 dcbx[0x1];
+ u8 reserved_at_1d2[0x4];
u8 rol_s[0x1];
u8 rol_g[0x1];
u8 reserved_at_1d8[0x1];
@@ -806,7 +828,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 tph[0x1];
u8 rf[0x1];
u8 dct[0x1];
- u8 reserved_at_21b[0x1];
+ u8 qos[0x1];
u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
@@ -872,7 +894,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x20];
+ u8 reserved_at_340[0x8];
+ u8 log_max_flow_counter_bulk[0x8];
+ u8 max_flow_counter[0x10];
+
u8 reserved_at_360[0x3];
u8 log_max_rq[0x5];
@@ -932,7 +957,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x220];
+ u8 reserved_at_5e0[0x10];
+ u8 tag_matching[0x1];
+ u8 rndv_offload_rc[0x1];
+ u8 rndv_offload_dc[0x1];
+ u8 log_tag_matching_list_sz[0x5];
+ u8 reserved_at_5e8[0x3];
+ u8 log_max_xrq[0x5];
+
+ u8 reserved_at_5f0[0x200];
};
enum mlx5_flow_destination_type {
@@ -951,7 +984,8 @@ struct mlx5_ifc_dest_format_struct_bits {
};
struct mlx5_ifc_flow_counter_list_bits {
- u8 reserved_at_0[0x10];
+ u8 clear[0x1];
+ u8 num_of_counters[0xf];
u8 flow_counter_id[0x10];
u8 reserved_at_20[0x20];
@@ -1970,7 +2004,7 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_560[0x5];
u8 rq_type[0x3];
- u8 srqn_rmpn[0x18];
+ u8 srqn_rmpn_xrqn[0x18];
u8 reserved_at_580[0x8];
u8 rmsn[0x18];
@@ -2021,6 +2055,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
+ struct mlx5_ifc_qos_cap_bits qos_cap;
u8 reserved_at_0[0x8000];
};
@@ -2236,7 +2271,8 @@ struct mlx5_ifc_sqc_bits {
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_4[0x4];
+ u8 reserved_at_4[0x1];
+ u8 min_wqe_inline_mode[0x3];
u8 state[0x4];
u8 reg_umr[0x1];
u8 reserved_at_d[0x13];
@@ -2247,8 +2283,9 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_at_60[0xa0];
+ u8 reserved_at_60[0x90];
+ u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
u8 reserved_at_110[0x10];
@@ -2332,7 +2369,9 @@ struct mlx5_ifc_rmpc_bits {
};
struct mlx5_ifc_nic_vport_context_bits {
- u8 reserved_at_0[0x1f];
+ u8 reserved_at_0[0x5];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_at_8[0x17];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@@ -2596,7 +2635,7 @@ struct mlx5_ifc_dctc_bits {
u8 reserved_at_98[0x8];
u8 reserved_at_a0[0x8];
- u8 srqn[0x18];
+ u8 srqn_xrqn[0x18];
u8 reserved_at_c0[0x8];
u8 pd[0x18];
@@ -2648,6 +2687,7 @@ enum {
enum {
MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ MLX5_CQ_PERIOD_NUM_MODES
};
struct mlx5_ifc_cqc_bits {
@@ -2725,6 +2765,54 @@ struct mlx5_ifc_query_adapter_param_block_bits {
u8 vsd_contd_psid[16][0x8];
};
+enum {
+ MLX5_XRQC_STATE_GOOD = 0x0,
+ MLX5_XRQC_STATE_ERROR = 0x1,
+};
+
+enum {
+ MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0,
+ MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1,
+};
+
+enum {
+ MLX5_XRQC_OFFLOAD_RNDV = 0x1,
+};
+
+struct mlx5_ifc_tag_matching_topology_context_bits {
+ u8 log_matching_list_sz[0x4];
+ u8 reserved_at_4[0xc];
+ u8 append_next_index[0x10];
+
+ u8 sw_phase_cnt[0x10];
+ u8 hw_phase_cnt[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_xrqc_bits {
+ u8 state[0x4];
+ u8 rlkey[0x1];
+ u8 reserved_at_5[0xf];
+ u8 topology[0x4];
+ u8 reserved_at_18[0x4];
+ u8 offload[0x4];
+
+ u8 reserved_at_20[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_at_60[0xa0];
+
+ struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
+
+ u8 reserved_at_180[0x180];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
struct mlx5_ifc_modify_field_select_bits modify_field_select;
struct mlx5_ifc_resize_field_select_bits resize_field_select;
@@ -3147,6 +3235,30 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 reserved_at_800[0x80];
};
+struct mlx5_ifc_query_xrq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_xrqc_bits xrq_context;
+};
+
+struct mlx5_ifc_query_xrq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 xrqn[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3550,7 +3662,27 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 out_of_sequence[0x20];
- u8 reserved_at_1e0[0x620];
+ u8 reserved_at_1e0[0x20];
+
+ u8 duplicate_request[0x20];
+
+ u8 reserved_at_220[0x20];
+
+ u8 rnr_nak_retry_err[0x20];
+
+ u8 reserved_at_260[0x20];
+
+ u8 packet_seq_err[0x20];
+
+ u8 reserved_at_2a0[0x20];
+
+ u8 implied_nak_seq_err[0x20];
+
+ u8 reserved_at_2e0[0x20];
+
+ u8 local_ack_timeout_err[0x20];
+
+ u8 reserved_at_320[0x4e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -5004,6 +5136,28 @@ struct mlx5_ifc_detach_from_mcg_in_bits {
u8 multicast_gid[16][0x8];
};
+struct mlx5_ifc_destroy_xrq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_xrq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 xrqn[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_destroy_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5589,6 +5743,30 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_create_xrq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 xrqn[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_xrq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_xrqc_bits xrq_context;
+};
+
struct mlx5_ifc_create_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6130,6 +6308,29 @@ struct mlx5_ifc_attach_to_mcg_in_bits {
u8 multicast_gid[16][0x8];
};
+struct mlx5_ifc_arm_xrq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_arm_xrq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 xrqn[0x18];
+
+ u8 reserved_at_60[0x10];
+ u8 lwm[0x10];
+};
+
struct mlx5_ifc_arm_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6167,7 +6368,8 @@ struct mlx5_ifc_arm_rq_out_bits {
};
enum {
- MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1,
+ MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2,
};
struct mlx5_ifc_arm_rq_in_bits {
@@ -6360,6 +6562,30 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 vxlan_udp_port[0x10];
};
+struct mlx5_ifc_set_rate_limit_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_rate_limit_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 rate_limit_index[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 rate_limit[0x20];
+};
+
struct mlx5_ifc_access_register_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6484,12 +6710,16 @@ struct mlx5_ifc_pude_reg_bits {
};
struct mlx5_ifc_ptys_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x1];
+ u8 an_disable_admin[0x1];
+ u8 an_disable_cap[0x1];
+ u8 reserved_at_3[0x5];
u8 local_port[0x8];
u8 reserved_at_10[0xd];
u8 proto_mask[0x3];
- u8 reserved_at_20[0x40];
+ u8 an_status[0x4];
+ u8 reserved_at_24[0x3c];
u8 eth_proto_capability[0x20];
@@ -7450,4 +7680,34 @@ struct mlx5_ifc_mcia_reg_bits {
u8 dword_11[0x20];
};
+struct mlx5_ifc_dcbx_param_bits {
+ u8 dcbx_cee_cap[0x1];
+ u8 dcbx_ieee_cap[0x1];
+ u8 dcbx_standby_cap[0x1];
+ u8 reserved_at_0[0x5];
+ u8 port_number[0x8];
+ u8 reserved_at_10[0xa];
+ u8 max_application_table_size[6];
+ u8 reserved_at_20[0x15];
+ u8 version_oper[0x3];
+ u8 reserved_at_38[5];
+ u8 version_admin[0x3];
+ u8 willing_admin[0x1];
+ u8 reserved_at_41[0x3];
+ u8 pfc_cap_oper[0x4];
+ u8 reserved_at_48[0x4];
+ u8 pfc_cap_admin[0x4];
+ u8 reserved_at_50[0x4];
+ u8 num_of_tc_oper[0x4];
+ u8 reserved_at_58[0x4];
+ u8 num_of_tc_admin[0x4];
+ u8 remote_willing[0x1];
+ u8 reserved_at_61[3];
+ u8 remote_pfc_cap[4];
+ u8 reserved_at_68[0x14];
+ u8 remote_num_of_tc[0x4];
+ u8 reserved_at_80[0x18];
+ u8 error[0x8];
+ u8 reserved_at_a0[0x160];
+};
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 9851862c0..e3012cc64 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -47,6 +47,14 @@ enum mlx5_module_id {
MLX5_MODULE_ID_QSFP28 = 0x11,
};
+enum mlx5_an_status {
+ MLX5_AN_UNAVAILABLE = 0,
+ MLX5_AN_COMPLETE = 1,
+ MLX5_AN_FAILED = 2,
+ MLX5_AN_LINK_UP = 3,
+ MLX5_AN_LINK_DOWN = 4,
+};
+
#define MLX5_EEPROM_MAX_BYTES 32
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
@@ -65,13 +73,17 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, int proto_mask,
u8 local_port);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask);
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index f43ed054a..33c97dc90 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -35,6 +35,31 @@
#include <linux/mlx5/driver.h>
+enum {
+ MLX5_SRQ_FLAG_ERR = (1 << 0),
+ MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+};
+
+struct mlx5_srq_attr {
+ u32 type;
+ u32 flags;
+ u32 log_size;
+ u32 wqe_shift;
+ u32 log_page_size;
+ u32 wqe_cnt;
+ u32 srqn;
+ u32 xrcd;
+ u32 page_offset;
+ u32 cqn;
+ u32 pd;
+ u32 lwm;
+ u32 user_index;
+ u64 db_record;
+ u64 *pas;
+};
+
+struct mlx5_core_dev;
+
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 6c16c198f..e087b7d04 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,6 +43,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
+void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index eb8609559..a7724810d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -309,10 +309,34 @@ struct vm_fault {
* VM_FAULT_DAX_LOCKED and fill in
* entry here.
*/
- /* for ->map_pages() only */
- pgoff_t max_pgoff; /* map pages for offset from pgoff till
- * max_pgoff inclusive */
- pte_t *pte; /* pte entry associated with ->pgoff */
+};
+
+/*
+ * Page fault context: passes though page fault handler instead of endless list
+ * of function arguments.
+ */
+struct fault_env {
+ struct vm_area_struct *vma; /* Target VMA */
+ unsigned long address; /* Faulting virtual address */
+ unsigned int flags; /* FAULT_FLAG_xxx flags */
+ pmd_t *pmd; /* Pointer to pmd entry matching
+ * the 'address'
+ */
+ pte_t *pte; /* Pointer to pte entry matching
+ * the 'address'. NULL if the page
+ * table hasn't been allocated.
+ */
+ spinlock_t *ptl; /* Page table lock.
+ * Protects pte page table if 'pte'
+ * is not NULL, otherwise pmd.
+ */
+ pgtable_t prealloc_pte; /* Pre-allocated pte page table.
+ * vm_ops->map_pages() calls
+ * alloc_set_pte() from atomic context.
+ * do_fault_around() pre-allocates
+ * page table to avoid allocation from
+ * atomic context.
+ */
};
/*
@@ -327,7 +351,8 @@ struct vm_operations_struct {
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
pmd_t *, unsigned int flags);
- void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ void (*map_pages)(struct fault_env *fe,
+ pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -537,7 +562,6 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
-int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
@@ -601,8 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon);
+int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
+ struct page *page);
#endif
/*
@@ -909,6 +933,11 @@ static inline struct zone *page_zone(const struct page *page)
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
+static inline pg_data_t *page_pgdat(const struct page *page)
+{
+ return NODE_DATA(page_to_nid(page));
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
@@ -949,11 +978,21 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
{
return page->mem_cgroup;
}
+static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return READ_ONCE(page->mem_cgroup);
+}
#else
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
}
+static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+}
#endif
/*
@@ -1035,6 +1074,7 @@ static inline pgoff_t page_file_index(struct page *page)
}
bool page_mapped(struct page *page);
+struct address_space *page_mapping(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -1215,15 +1255,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
-extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
+extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
#else
-static inline int handle_mm_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+static inline int handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
{
/* should never happen if there's no MMU */
BUG();
@@ -2086,7 +2125,8 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
-extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern void filemap_map_pages(struct fault_env *fe,
+ pgoff_t start_pgoff, pgoff_t end_pgoff);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* mm/page-writeback.c */
@@ -2282,13 +2322,14 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
+extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
+
#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
#endif
-void drop_pagecache(void);
void drop_slab(void);
void drop_slab_node(int nid);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 5bd29ba4f..71613e8a7 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -23,25 +23,30 @@ static inline int page_is_file_cache(struct page *page)
}
static __always_inline void __update_lru_size(struct lruvec *lruvec,
- enum lru_list lru, int nr_pages)
+ enum lru_list lru, enum zone_type zid,
+ int nr_pages)
{
- __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+ __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
+ __mod_zone_page_state(&pgdat->node_zones[zid],
+ NR_ZONE_LRU_BASE + lru, nr_pages);
}
static __always_inline void update_lru_size(struct lruvec *lruvec,
- enum lru_list lru, int nr_pages)
+ enum lru_list lru, enum zone_type zid,
+ int nr_pages)
{
+ __update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
-#else
- __update_lru_size(lruvec, lru, nr_pages);
#endif
}
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
@@ -49,7 +54,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
- update_lru_size(lruvec, lru, -hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
}
/**
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 10bc49181..bf28080fa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -60,51 +60,52 @@ struct page {
};
/* Second double word */
- struct {
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+ /* page_deferred_list().prev -- second tail page */
+ };
- union {
+ union {
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
- /* Used for cmpxchg_double in slub */
- unsigned long counters;
+ /* Used for cmpxchg_double in slub */
+ unsigned long counters;
#else
- /*
- * Keep _refcount separate from slub cmpxchg_double
- * data. As the rest of the double word is protected by
- * slab_lock but _refcount is not.
- */
- unsigned counters;
+ /*
+ * Keep _refcount separate from slub cmpxchg_double data.
+ * As the rest of the double word is protected by slab_lock
+ * but _refcount is not.
+ */
+ unsigned counters;
#endif
+ struct {
- struct {
-
- union {
- /*
- * Count of ptes mapped in mms, to show
- * when page is mapped & limit reverse
- * map searches.
- */
- atomic_t _mapcount;
-
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
- };
+ union {
/*
- * Usage count, *USE WRAPPER FUNCTION*
- * when manual accounting. See page_ref.h
+ * Count of ptes mapped in mms, to show when
+ * page is mapped & limit reverse map searches.
+ *
+ * Extra information about page type may be
+ * stored here for pages that are never mapped,
+ * in which case the value MUST BE <= -2.
+ * See page-flags.h for more details.
*/
- atomic_t _refcount;
+ atomic_t _mapcount;
+
+ unsigned int active; /* SLAB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ int units; /* SLOB */
};
- unsigned int active; /* SLAB */
+ /*
+ * Usage count, *USE WRAPPER FUNCTION* when manual
+ * accounting. See page_ref.h
+ */
+ atomic_t _refcount;
};
};
@@ -117,7 +118,7 @@ struct page {
*/
union {
struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone->lru_lock !
+ * protected by zone_lru_lock !
* Can be used as a generic list
* by the page owner.
*/
@@ -359,6 +360,9 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef CONFIG_UKSM
+ struct vma_slot *uksm_vma_slot;
+#endif
};
struct core_thread {
@@ -596,6 +600,9 @@ struct vm_special_mapping {
int (*fault)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma,
struct vm_fault *vmf);
+
+ int (*mremap)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma);
};
enum tlb_flush_reason {
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 33e17f6a3..634c4c51f 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -49,7 +49,7 @@ static inline void vm_unacct_memory(long pages)
*
* Returns true if the prot flags are valid
*/
-static inline int arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot)
{
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
}
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index eb0151bac..d8673ca96 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -95,6 +95,7 @@ struct mmc_ext_csd {
u8 raw_partition_support; /* 160 */
u8 raw_rpmb_size_mult; /* 168 */
u8 raw_erased_mem_count; /* 181 */
+ u8 strobe_support; /* 184 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
u8 raw_driver_strength; /* 197 */
@@ -279,6 +280,7 @@ struct mmc_card {
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
unsigned int erase_size; /* erase size in sectors */
@@ -353,6 +355,9 @@ struct mmc_fixup {
/* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
u16 cis_vendor, cis_device;
+ /* for MMC cards */
+ unsigned int ext_csd_rev;
+
void (*vendor_fixup)(struct mmc_card *card, int data);
int data;
};
@@ -361,11 +366,20 @@ struct mmc_fixup {
#define CID_OEMID_ANY ((unsigned short) -1)
#define CID_NAME_ANY (NULL)
+#define EXT_CSD_REV_ANY (-1u)
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_HYNIX 0x90
+
#define END_FIXUP { NULL }
#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
_cis_vendor, _cis_device, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
{ \
.name = (_name), \
.manfid = (_manfid), \
@@ -376,23 +390,30 @@ struct mmc_fixup {
.cis_device = (_cis_device), \
.vendor_fixup = (_fixup), \
.data = (_data), \
+ .ext_csd_rev = (_ext_csd_rev), \
}
#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
_FIXUP_EXT(_name, _manfid, \
_oemid, _rev_start, _rev_end, \
SDIO_ANY_ID, SDIO_ANY_ID, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
- MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
+ _ext_csd_rev) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ _ext_csd_rev)
#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
CID_OEMID_ANY, 0, -1ull, \
_vendor, _device, \
- _fixup, _data) \
+ _fixup, _data, EXT_CSD_REV_ANY) \
#define cid_rev(hwrev, fwrev, year, month) \
(((u64) hwrev) << 40 | \
@@ -511,6 +532,11 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
}
+static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index f7ed271a1..83b0edfce 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -112,7 +112,6 @@ struct dw_mci_dma_slave {
* @part_buf: Simple buffer for partial fifo reads/writes.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
- * @quirks: Set of quirks that apply to specific versions of the IP.
* @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
@@ -218,9 +217,6 @@ struct dw_mci {
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
- /* Workaround flags */
- u32 quirks;
-
bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
@@ -242,17 +238,12 @@ struct dw_mci_dma_ops {
void (*exit)(struct dw_mci *host);
};
-/* IP Quirks/flags. */
-/* Timer for broken data transfer over scheme */
-#define DW_MCI_QUIRK_BROKEN_DTO BIT(0)
-
struct dma_pdata;
/* Board platform data */
struct dw_mci_board {
u32 num_slots;
- u32 quirks; /* Workaround / Quirk flags */
unsigned int bus_hz; /* Clock speed at the cclk_in pad */
u32 caps; /* Capabilities */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 45cde8cd3..aa4bfbf12 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -19,6 +19,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
struct mmc_ios {
@@ -77,6 +78,8 @@ struct mmc_ios {
#define MMC_SET_DRIVER_TYPE_A 1
#define MMC_SET_DRIVER_TYPE_C 2
#define MMC_SET_DRIVER_TYPE_D 3
+
+ bool enhanced_strobe; /* hs400es selection */
};
struct mmc_host_ops {
@@ -143,6 +146,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Prepare enhanced strobe depending host driver */
+ void (*hs400_enhanced_strobe)(struct mmc_host *host,
+ struct mmc_ios *ios);
int (*select_drive_strength)(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
@@ -302,6 +308,9 @@ struct mmc_host {
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */
+#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
+#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
+#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -513,6 +522,11 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+static inline bool mmc_card_hs400es(struct mmc_card *card)
+{
+ return card->host->ios.enhanced_strobe;
+}
+
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 15f2c4a0a..c376209c7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -297,6 +297,7 @@ struct _mmc_csd {
#define EXT_CSD_PART_CONFIG 179 /* R/W */
#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
#define EXT_CSD_BUS_WIDTH 183 /* R/W */
+#define EXT_CSD_STROBE_SUPPORT 184 /* RO */
#define EXT_CSD_HS_TIMING 185 /* R/W */
#define EXT_CSD_POWER_CLASS 187 /* R/W */
#define EXT_CSD_REV 192 /* RO */
@@ -380,12 +381,14 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */
#define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \
EXT_CSD_CARD_TYPE_HS400_1_2V)
+#define EXT_CSD_CARD_TYPE_HS400ES (1<<8) /* Card can run at HS400ES */
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */
#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */
+#define EXT_CSD_BUS_WIDTH_STROBE BIT(7) /* Enhanced strobe mode */
#define EXT_CSD_TIMING_BC 0 /* Backwards compatility */
#define EXT_CSD_TIMING_HS 1 /* High speed */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index de7be78c6..451a811f4 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -39,6 +39,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON(cond) WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#define VM_WARN(cond, format...) WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
@@ -47,6 +48,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 02069c234..89f7dd862 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
@@ -93,7 +95,7 @@ struct free_area {
struct pglist_data;
/*
- * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
+ * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
* So add a wild amount of padding here to ensure that they fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
@@ -110,36 +112,23 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
- NR_ALLOC_BATCH,
- NR_LRU_BASE,
- NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
- NR_ACTIVE_ANON, /* " " " " " */
- NR_INACTIVE_FILE, /* " " " " " */
- NR_ACTIVE_FILE, /* " " " " " */
- NR_UNEVICTABLE, /* " " " " " */
+ NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
+ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
+ NR_ZONE_ACTIVE_ANON,
+ NR_ZONE_INACTIVE_FILE,
+ NR_ZONE_ACTIVE_FILE,
+ NR_ZONE_UNEVICTABLE,
+ NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_ANON_PAGES, /* Mapped anonymous pages */
- NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
- only modified from process context */
- NR_FILE_PAGES,
- NR_FILE_DIRTY,
- NR_WRITEBACK,
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
- NR_KERNEL_STACK,
+ NR_KERNEL_STACK_KB, /* measured in KiB */
/* Second 128 byte cacheline */
- NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
- NR_VMSCAN_WRITE,
- NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
- NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
- NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
- NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
- NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
- NR_DIRTIED, /* page dirtyings since bootup */
- NR_WRITTEN, /* page writings since bootup */
- NR_PAGES_SCANNED, /* pages scanned since last reclaim */
+#if IS_ENABLED(CONFIG_ZSMALLOC)
+ NR_ZSPAGES, /* allocated in zsmalloc */
+#endif
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
@@ -148,12 +137,43 @@ enum zone_stat_item {
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
#endif
+ NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UKSM
+ NR_UKSM_ZERO_PAGES,
+#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
+enum node_stat_item {
+ NR_LRU_BASE,
+ NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
+ NR_ACTIVE_ANON, /* " " " " " */
+ NR_INACTIVE_FILE, /* " " " " " */
+ NR_ACTIVE_FILE, /* " " " " " */
+ NR_UNEVICTABLE, /* " " " " " */
+ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
+ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
+ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
WORKINGSET_NODERECLAIM,
- NR_ANON_TRANSPARENT_HUGEPAGES,
- NR_FREE_CMA_PAGES,
- NR_VM_ZONE_STAT_ITEMS };
+ NR_ANON_MAPPED, /* Mapped anonymous pages */
+ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
+ only modified from process context */
+ NR_FILE_PAGES,
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
+ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
+ NR_SHMEM_THPS,
+ NR_SHMEM_PMDMAPPED,
+ NR_ANON_THPS,
+ NR_UNSTABLE_NFS, /* NFS unstable pages */
+ NR_VMSCAN_WRITE,
+ NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
+ NR_DIRTIED, /* page dirtyings since bootup */
+ NR_WRITTEN, /* page writings since bootup */
+ NR_VM_NODE_STAT_ITEMS
+};
/*
* We do arithmetic on the LRU lists in various places in the code,
@@ -210,7 +230,7 @@ struct lruvec {
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
#ifdef CONFIG_MEMCG
- struct zone *zone;
+ struct pglist_data *pgdat;
#endif
};
@@ -262,6 +282,11 @@ struct per_cpu_pageset {
#endif
};
+struct per_cpu_nodestat {
+ s8 stat_threshold;
+ s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
+};
+
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
@@ -343,22 +368,9 @@ struct zone {
#ifdef CONFIG_NUMA
int node;
#endif
-
- /*
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
- * this zone's LRU. Maintained by the pageout code.
- */
- unsigned int inactive_ratio;
-
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
- /*
- * This is a per-zone reserve of pages that are not available
- * to userspace allocations.
- */
- unsigned long totalreserve_pages;
-
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
@@ -367,14 +379,6 @@ struct zone {
unsigned long *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NUMA
- /*
- * zone reclaim becomes active if more unmapped pages exist.
- */
- unsigned long min_unmapped_pages;
- unsigned long min_slab_pages;
-#endif /* CONFIG_NUMA */
-
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
@@ -467,24 +471,21 @@ struct zone {
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
+ /* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
+
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
/* zone flags, see below */
unsigned long flags;
- /* Write-intensive fields used from the page allocator */
+ /* Primarily protects free_area */
spinlock_t lock;
+ /* Write-intensive fields used by compaction and vmstats. */
ZONE_PADDING(_pad2_)
- /* Write-intensive fields used by page reclaim */
-
- /* Fields commonly accessed by the page reclaim scanner */
- spinlock_t lru_lock;
- struct lruvec lruvec;
-
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter
@@ -522,20 +523,18 @@ struct zone {
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
} ____cacheline_internodealigned_in_smp;
-enum zone_flags {
- ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
- ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
- ZONE_CONGESTED, /* zone has many dirty pages backed by
+enum pgdat_flags {
+ PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
* a congested BDI
*/
- ZONE_DIRTY, /* reclaim scanning has recently found
+ PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
- ZONE_WRITEBACK, /* reclaim scanning has recently found
+ PGDAT_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback
*/
- ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
+ PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
static inline unsigned long zone_end_pfn(const struct zone *zone)
@@ -659,8 +658,9 @@ typedef struct pglist_data {
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
- int kswapd_max_order;
- enum zone_type classzone_idx;
+ int kswapd_order;
+ enum zone_type kswapd_classzone_idx;
+
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
@@ -677,6 +677,23 @@ typedef struct pglist_data {
/* Number of pages migrated during the rate limiting time interval */
unsigned long numabalancing_migrate_nr_pages;
#endif
+ /*
+ * This is a per-node reserve of pages that are not available
+ * to userspace allocations.
+ */
+ unsigned long totalreserve_pages;
+
+#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_pages;
+ unsigned long min_slab_pages;
+#endif /* CONFIG_NUMA */
+
+ /* Write-intensive fields used by page reclaim */
+ ZONE_PADDING(_pad1_)
+ spinlock_t lru_lock;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -691,6 +708,23 @@ typedef struct pglist_data {
struct list_head split_queue;
unsigned long split_queue_len;
#endif
+
+ /* Fields commonly accessed by the page reclaim scanner */
+ struct lruvec lruvec;
+
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+ * this node's LRU. Maintained by the pageout code.
+ */
+ unsigned int inactive_ratio;
+
+ unsigned long flags;
+
+ ZONE_PADDING(_pad2_)
+
+ /* Per-node vmstats */
+ struct per_cpu_nodestat __percpu *per_cpu_nodestats;
+ atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -704,6 +738,15 @@ typedef struct pglist_data {
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
+static inline spinlock_t *zone_lru_lock(struct zone *zone)
+{
+ return &zone->zone_pgdat->lru_lock;
+}
+
+static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
+{
+ return &pgdat->lruvec;
+}
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
@@ -756,12 +799,12 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
extern void lruvec_init(struct lruvec *lruvec);
-static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
{
#ifdef CONFIG_MEMCG
- return lruvec->zone;
+ return lruvec->pgdat;
#else
- return container_of(lruvec, struct zone, lruvec);
+ return container_of(lruvec, struct pglist_data, lruvec);
#endif
}
@@ -788,9 +831,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+ return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
{
- return (!!zone->present_pages);
+ return zone->present_pages;
}
extern int movable_zone;
@@ -817,7 +872,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 6e4c645e1..ed84c07f6 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -657,4 +657,20 @@ struct ulpi_device_id {
kernel_ulong_t driver_data;
};
+/**
+ * struct fsl_mc_device_id - MC object device identifier
+ * @vendor: vendor ID
+ * @obj_type: MC object type
+ * @ver_major: MC object version major number
+ * @ver_minor: MC object version minor number
+ *
+ * Type of entries in the "device Id" table for MC object devices supported by
+ * a MC object device driver. The last entry of the table has vendor set to 0x0
+ */
+struct fsl_mc_device_id {
+ __u16 vendor;
+ const char obj_type[16];
+};
+
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 3daf2b3a0..0c3207d26 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -18,6 +18,7 @@
#include <linux/moduleparam.h>
#include <linux/jump_label.h>
#include <linux/export.h>
+#include <linux/extable.h> /* only as arch move module.h -> extable.h */
#include <linux/rbtree_latch.h>
#include <linux/percpu.h>
@@ -37,6 +38,7 @@ struct modversion_info {
};
struct module;
+struct exception_table_entry;
struct module_kobject {
struct kobject kobj;
@@ -155,18 +157,6 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
-/* Archs provide a method of finding the correct exception table. */
-struct exception_table_entry;
-
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long value);
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish);
-void sort_main_extable(void);
-void trim_init_extable(struct module *m);
-
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
@@ -268,9 +258,6 @@ extern const typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-/* Given an address, look for it in the exception tables */
-const struct exception_table_entry *search_exception_tables(unsigned long add);
-
struct notifier_block;
#ifdef CONFIG_MODULES
@@ -311,6 +298,8 @@ struct module_layout {
unsigned int text_size;
/* Size of RO section of the module (text+rodata) */
unsigned int ro_size;
+ /* Size of RO after init section */
+ unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -575,8 +564,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
-extern void __module_put_and_exit(struct module *mod, long code)
- __attribute__((noreturn));
+extern void __noreturn __module_put_and_exit(struct module *mod,
+ long code);
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
@@ -630,9 +619,6 @@ const char *module_address_lookup(unsigned long addr,
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-/* For extable.c to search modules' exception tables. */
-const struct exception_table_entry *search_module_extables(unsigned long addr);
-
int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);
@@ -657,13 +643,6 @@ static inline bool is_livepatch_module(struct module *mod)
#else /* !CONFIG_MODULES... */
-/* Given an address, look for it in the exception tables. */
-static inline const struct exception_table_entry *
-search_module_extables(unsigned long addr)
-{
- return NULL;
-}
-
static inline struct module *__module_address(unsigned long addr)
{
return NULL;
@@ -788,12 +767,12 @@ extern int module_sysfs_initialized;
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
extern void set_all_modules_text_rw(void);
extern void set_all_modules_text_ro(void);
-extern void module_enable_ro(const struct module *mod);
+extern void module_enable_ro(const struct module *mod, bool after_init);
extern void module_disable_ro(const struct module *mod);
#else
static inline void set_all_modules_text_rw(void) { }
static inline void set_all_modules_text_ro(void) { }
-static inline void module_enable_ro(const struct module *mod) { }
+static inline void module_enable_ro(const struct module *mod, bool after_init) { }
static inline void module_disable_ro(const struct module *mod) { }
#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index f822c3c11..54a594d49 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,6 +81,7 @@ extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(struct path *path);
extern int __mnt_is_readonly(struct vfsmount *mnt);
+extern bool mnt_may_suid(struct vfsmount *mnt);
struct path;
extern struct vfsmount *clone_private_mount(struct path *path);
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 3a5abe95a..1cc5ffb76 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
-int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
-int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes,
+int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
#define log_mpidump g10_log_mpidump
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index bf9b322cb..e5fb81376 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -104,6 +104,7 @@ struct mfc_cache {
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
+ unsigned long lastuse;
unsigned char ttls[MAXVIFS]; /* TTL thresholds */
} res;
} mfc_un;
@@ -119,5 +120,5 @@ struct mfc_cache {
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 66982e764..19a1c0c29 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -92,6 +92,7 @@ struct mfc6_cache {
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
+ unsigned long lastuse;
unsigned char ttls[MAXMIFS]; /* TTL thresholds */
} res;
} mfc_un;
@@ -115,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index ec39a086f..e8c81fbd5 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -47,6 +47,7 @@ struct fsl_mc_msi_desc {
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
+ * @affinity: Optional pointer to a cpu affinity mask for this descriptor
*
* @masked: [PCI MSI/X] Mask bits
* @is_msix: [PCI MSI/X] True if MSI-X
@@ -67,6 +68,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
+ const struct cpumask *affinity;
union {
/* PCI MSI/X specific data */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index fbe8e164a..8dd6e01f4 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -783,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
* NAND Flash Manufacturer ID Codes
*/
#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_ESMT 0xc8
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_FUJITSU 0x04
#define NAND_MFR_NATIONAL 0x8f
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 7f041bd88..c425c7b4c 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -173,10 +173,10 @@ struct spi_nor {
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*read)(struct spi_nor *nor, loff_t from,
- size_t len, size_t *retlen, u_char *read_buf);
- void (*write)(struct spi_nor *nor, loff_t to,
- size_t len, size_t *retlen, const u_char *write_buf);
+ ssize_t (*read)(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *read_buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d3d0398f2..f29abda31 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -81,8 +81,6 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
-struct qstr;
-extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index aee2761d2..f1ea426d6 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -26,6 +26,7 @@ struct nd_device_driver {
unsigned long type;
int (*probe)(struct device *dev);
int (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};
@@ -67,7 +68,7 @@ struct nd_namespace_io {
struct nd_namespace_common common;
struct resource res;
resource_size_t size;
- void __pmem *addr;
+ void *addr;
struct badblocks bb;
};
diff --git a/include/linux/net.h b/include/linux/net.h
index 25aa03b51..b9f0ff4d4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -185,6 +185,7 @@ struct proto_ops {
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
+ int (*peek_len)(struct socket *sock);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index aa7b2400f..9c6c8ef2e 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
* headers in software.
*/
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
+ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
+ NETIF_F_GSO_SCTP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -128,6 +129,7 @@ enum {
#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
+#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -166,7 +168,8 @@ enum {
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
+ NETIF_F_GSO_SCTP)
/*
* If one device supports one of these features, then enable them
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4f0e6fb39..e8d79d4eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -61,6 +61,9 @@ struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
struct mpls_dev;
+/* UDP Tunnel offloads */
+struct udp_tunnel_info;
+struct bpf_prog;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -90,7 +93,6 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
-#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
@@ -785,6 +787,7 @@ enum {
TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
+ TC_SETUP_MATCHALL,
};
struct tc_cls_u32_offload;
@@ -795,9 +798,37 @@ struct tc_to_netdev {
u8 tc;
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
+ struct tc_cls_matchall_offload *cls_mall;
};
};
+/* These structures hold the attributes of xdp state that are being passed
+ * to the netdevice through the xdp op.
+ */
+enum xdp_netdev_command {
+ /* Set or clear a bpf program used in the earliest stages of packet
+ * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
+ * is responsible for calling bpf_prog_put on any old progs that are
+ * stored. In case of error, the callee need not release the new prog
+ * reference, but on success it takes ownership and must bpf_prog_put
+ * when it is no longer used.
+ */
+ XDP_SETUP_PROG,
+ /* Check if a bpf program is set on the device. The callee should
+ * return true if a program is currently attached and running.
+ */
+ XDP_QUERY_PROG,
+};
+
+struct netdev_xdp {
+ enum xdp_netdev_command command;
+ union {
+ /* XDP_SETUP_PROG */
+ struct bpf_prog *prog;
+ /* XDP_QUERY_PROG */
+ bool prog_attached;
+ };
+};
/*
* This structure defines the management hooks for network devices.
@@ -1025,31 +1056,18 @@ struct tc_to_netdev {
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
*
- * void (*ndo_add_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
- * Called by vxlan to notify a driver about the UDP port and socket
- * address family that vxlan is listening to. It is called only when
- * a new port starts listening. The operation is protected by the
- * vxlan_net->sock_lock.
- *
- * void (*ndo_add_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
- * Called by geneve to notify a driver about the UDP port and socket
- * address family that geneve is listnening to. It is called only when
- * a new port starts listening. The operation is protected by the
- * geneve_net->sock_lock.
- *
- * void (*ndo_del_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
- * Called by geneve to notify the driver about a UDP port and socket
- * address family that geneve is not listening to anymore. The operation
- * is protected by the geneve_net->sock_lock.
- *
- * void (*ndo_del_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
- * Called by vxlan to notify the driver about a UDP port and socket
- * address family that vxlan is not listening to anymore. The operation
- * is protected by the vxlan_net->sock_lock.
+ * void (*ndo_udp_tunnel_add)(struct net_device *dev,
+ * struct udp_tunnel_info *ti);
+ * Called by UDP tunnel to notify a driver about the UDP port and socket
+ * address family that a UDP tunnel is listnening to. It is called only
+ * when a new port starts listening. The operation is protected by the
+ * RTNL.
+ *
+ * void (*ndo_udp_tunnel_del)(struct net_device *dev,
+ * struct udp_tunnel_info *ti);
+ * Called by UDP tunnel to notify the driver about a UDP port and socket
+ * address family that the UDP tunnel is not listening to anymore. The
+ * operation is protected by the RTNL.
*
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
@@ -1099,6 +1117,9 @@ struct tc_to_netdev {
* appropriate rx headroom value allows avoiding skb head copy on
* forward. Setting a negative value resets the rx headroom to the
* default value.
+ * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
+ * This function is used to set or query state related to XDP on the
+ * netdevice. See definition of enum xdp_netdev_command for details.
*
*/
struct net_device_ops {
@@ -1221,8 +1242,10 @@ struct net_device_ops {
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
netdev_features_t features);
- int (*ndo_neigh_construct)(struct neighbour *n);
- void (*ndo_neigh_destroy)(struct neighbour *n);
+ int (*ndo_neigh_construct)(struct net_device *dev,
+ struct neighbour *n);
+ void (*ndo_neigh_destroy)(struct net_device *dev,
+ struct neighbour *n);
int (*ndo_fdb_add)(struct ndmsg *ndm,
struct nlattr *tb[],
@@ -1258,18 +1281,10 @@ struct net_device_ops {
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_add_vxlan_port)(struct net_device *dev,
- sa_family_t sa_family,
- __be16 port);
- void (*ndo_del_vxlan_port)(struct net_device *dev,
- sa_family_t sa_family,
- __be16 port);
- void (*ndo_add_geneve_port)(struct net_device *dev,
- sa_family_t sa_family,
- __be16 port);
- void (*ndo_del_geneve_port)(struct net_device *dev,
- sa_family_t sa_family,
- __be16 port);
+ void (*ndo_udp_tunnel_add)(struct net_device *dev,
+ struct udp_tunnel_info *ti);
+ void (*ndo_udp_tunnel_del)(struct net_device *dev,
+ struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1289,6 +1304,8 @@ struct net_device_ops {
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
int needed_headroom);
+ int (*ndo_xdp)(struct net_device *dev,
+ struct netdev_xdp *xdp);
};
/**
@@ -1457,6 +1474,8 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
+ * @ndisc_ops: Includes callbacks for different IPv6 neighbour
+ * discovery handling. Necessary for e.g. 6LoWPAN.
* @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
@@ -1484,8 +1503,7 @@ enum netdev_priv_flags {
* @perm_addr: Permanent hw address
* @addr_assign_type: Hw address assignment type
* @addr_len: Hardware address length
- * @neigh_priv_len; Used in neigh_alloc(),
- * initialized only in atm/clip.c
+ * @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
* @dev_port: Used to differentiate devices that share
@@ -1594,7 +1612,8 @@ enum netdev_priv_flags {
* @phydev: Physical device may attach itself
* for hardware timestamping
*
- * @qdisc_tx_busylock: XXX: need comments on this one
+ * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
+ * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -1673,6 +1692,9 @@ struct net_device {
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ndisc_ops *ndisc_ops;
+#endif
const struct header_ops *header_ops;
@@ -1862,6 +1884,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ struct lock_class_key *qdisc_running_key;
bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1944,6 +1967,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
+#define netdev_lockdep_set_classes(dev) \
+{ \
+ static struct lock_class_key qdisc_tx_busylock_key; \
+ static struct lock_class_key qdisc_running_key; \
+ static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
+ unsigned int i; \
+ \
+ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+ (dev)->qdisc_running_key = &qdisc_running_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
+ for (i = 0; i < (dev)->num_tx_queues; i++) \
+ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+ &qdisc_xmit_lock_key); \
+}
+
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
@@ -2233,8 +2273,8 @@ struct netdev_lag_lower_state_info {
#define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
-#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
-#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
+#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
+#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2370,6 +2410,8 @@ void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion);
+#define XMIT_RECURSION_LIMIT 10
+
static inline int dev_recursion_level(void)
{
return this_cpu_read(xmit_recursion);
@@ -3251,6 +3293,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
+int dev_change_xdp_fd(struct net_device *dev, int fd);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3800,12 +3843,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
void *netdev_lower_get_next(struct net_device *dev,
struct list_head **iter);
+
#define netdev_for_each_lower_dev(dev, ldev, iter) \
for (iter = (dev)->adj_list.lower.next, \
ldev = netdev_lower_get_next(dev, &(iter)); \
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
+struct net_device *netdev_all_lower_get_next(struct net_device *dev,
+ struct list_head **iter);
+struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
+ for (iter = (dev)->all_adj_list.lower.next, \
+ ldev = netdev_all_lower_get_next(dev, &(iter)); \
+ ldev; \
+ ldev = netdev_all_lower_get_next(dev, &(iter)))
+
+#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
+ for (iter = (dev)->all_adj_list.lower.next, \
+ ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
+ ldev; \
+ ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
+
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3821,14 +3882,17 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
+int netdev_default_l2upper_neigh_construct(struct net_device *dev,
+ struct neighbour *n);
+void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
+ struct neighbour *n);
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(const struct net_device *dev));
+int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
@@ -4013,6 +4077,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 80ca889b1..664da0048 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -15,6 +15,6 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-extern int nfnl_acct_overquota(const struct sk_buff *skb,
- struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+ struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dc4f58a3c..2ad1a2b28 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -6,6 +6,10 @@
#include <linux/static_key.h>
#include <uapi/linux/netfilter/x_tables.h>
+/* Test a struct->invflags and a boolean for inequality */
+#define NF_INVF(ptr, flag, boolean) \
+ ((boolean) ^ !!((ptr)->invflags & (flag)))
+
/**
* struct xt_action_param - parameters for matches/targets
*
@@ -246,6 +250,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
+unsigned int *xt_alloc_entry_offsets(unsigned int size);
+bool xt_find_jump_offset(const unsigned int *offsets,
+ unsigned int target, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2ea517c7c..984b2112c 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -115,8 +115,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct ebt_table *table);
-/* Used in the kernel match() functions */
-#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS))
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bfed6b367..c6564ada9 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -643,4 +643,15 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
};
+#define NFS4_OP_MAP_NUM_LONGS \
+ DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
+#define NFS4_OP_MAP_NUM_WORDS \
+ (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
+struct nfs4_op_map {
+ union {
+ unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
+ u32 words[NFS4_OP_MAP_NUM_WORDS];
+ } u;
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index d71278c3c..810124b33 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -205,12 +205,12 @@ struct nfs_inode {
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
-#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
+#define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -351,7 +351,6 @@ extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *ino
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index c304a11b5..7cc0deee5 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1185,17 +1185,6 @@ struct pnfs_ds_commit_info {
struct pnfs_commit_bucket *buckets;
};
-#define NFS4_OP_MAP_NUM_LONGS \
- DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
-#define NFS4_OP_MAP_NUM_WORDS \
- (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
-struct nfs4_op_map {
- union {
- unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
- u32 words[NFS4_OP_MAP_NUM_WORDS];
- } u;
-};
-
struct nfs41_state_protection {
u32 how;
struct nfs4_op_map enforce;
@@ -1543,7 +1532,7 @@ struct nfs_rpc_ops {
struct nfs_fattr *, struct nfs4_label *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, struct qstr *,
+ int (*lookup) (struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *,
struct nfs4_label *);
int (*access) (struct inode *, struct nfs_access_entry *);
@@ -1551,18 +1540,18 @@ struct nfs_rpc_ops {
unsigned int);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int);
- int (*remove) (struct inode *, struct qstr *);
+ int (*remove) (struct inode *, const struct qstr *);
void (*unlink_setup) (struct rpc_message *, struct inode *dir);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
int (*unlink_done) (struct rpc_task *, struct inode *);
void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
- int (*link) (struct inode *, struct inode *, struct qstr *);
+ int (*link) (struct inode *, struct inode *, const struct qstr *);
int (*symlink) (struct inode *, struct dentry *, struct page *,
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
- int (*rmdir) (struct inode *, struct qstr *);
+ int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct dentry *, struct rpc_cred *,
u64, struct page **, unsigned int, int);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
@@ -1596,9 +1585,8 @@ struct nfs_rpc_ops {
int (*have_delegation)(struct inode *, fmode_t);
int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
- struct nfs_client *
- (*init_client) (struct nfs_client *, const struct rpc_timeout *,
- const char *);
+ struct nfs_client *(*init_client) (struct nfs_client *,
+ const struct nfs_client_initdata *);
void (*free_client) (struct nfs_client *);
struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
new file mode 100644
index 000000000..bf240a3cb
--- /dev/null
+++ b/include/linux/nvme-rdma.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _LINUX_NVME_RDMA_H
+#define _LINUX_NVME_RDMA_H
+
+enum nvme_rdma_cm_fmt {
+ NVME_RDMA_CM_FMT_1_0 = 0x0,
+};
+
+enum nvme_rdma_cm_status {
+ NVME_RDMA_CM_INVALID_LEN = 0x01,
+ NVME_RDMA_CM_INVALID_RECFMT = 0x02,
+ NVME_RDMA_CM_INVALID_QID = 0x03,
+ NVME_RDMA_CM_INVALID_HSQSIZE = 0x04,
+ NVME_RDMA_CM_INVALID_HRQSIZE = 0x05,
+ NVME_RDMA_CM_NO_RSC = 0x06,
+ NVME_RDMA_CM_INVALID_IRD = 0x07,
+ NVME_RDMA_CM_INVALID_ORD = 0x08,
+};
+
+/**
+ * struct nvme_rdma_cm_req - rdma connect request
+ *
+ * @recfmt: format of the RDMA Private Data
+ * @qid: queue Identifier for the Admin or I/O Queue
+ * @hrqsize: host receive queue size to be created
+ * @hsqsize: host send queue size to be created
+ */
+struct nvme_rdma_cm_req {
+ __le16 recfmt;
+ __le16 qid;
+ __le16 hrqsize;
+ __le16 hsqsize;
+ u8 rsvd[24];
+};
+
+/**
+ * struct nvme_rdma_cm_rep - rdma connect reply
+ *
+ * @recfmt: format of the RDMA Private Data
+ * @crqsize: controller receive queue size
+ */
+struct nvme_rdma_cm_rep {
+ __le16 recfmt;
+ __le16 crqsize;
+ u8 rsvd[28];
+};
+
+/**
+ * struct nvme_rdma_cm_rej - rdma connect reject
+ *
+ * @recfmt: format of the RDMA Private Data
+ * @fsts: error status for the associated connect request
+ */
+struct nvme_rdma_cm_rej {
+ __le16 recfmt;
+ __le16 sts;
+};
+
+#endif /* _LINUX_NVME_RDMA_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7d51b2904..7676557ce 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,6 +16,78 @@
#define _LINUX_NVME_H
#include <linux/types.h>
+#include <linux/uuid.h>
+
+/* NQN names in commands fields specified one size */
+#define NVMF_NQN_FIELD_LEN 256
+
+/* However the max length of a qualified name is another size */
+#define NVMF_NQN_SIZE 223
+
+#define NVMF_TRSVCID_SIZE 32
+#define NVMF_TRADDR_SIZE 256
+#define NVMF_TSAS_SIZE 256
+
+#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
+
+#define NVME_RDMA_IP_PORT 4420
+
+enum nvme_subsys_type {
+ NVME_NQN_DISC = 1, /* Discovery type target subsystem */
+ NVME_NQN_NVME = 2, /* NVME type target subsystem */
+};
+
+/* Address Family codes for Discovery Log Page entry ADRFAM field */
+enum {
+ NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
+ NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
+ NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
+ NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
+ NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
+};
+
+/* Transport Type codes for Discovery Log Page entry TRTYPE field */
+enum {
+ NVMF_TRTYPE_RDMA = 1, /* RDMA */
+ NVMF_TRTYPE_FC = 2, /* Fibre Channel */
+ NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
+ NVMF_TRTYPE_MAX,
+};
+
+/* Transport Requirements codes for Discovery Log Page entry TREQ field */
+enum {
+ NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
+ NVMF_TREQ_REQUIRED = 1, /* Required */
+ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+};
+
+/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
+ * RDMA_QPTYPE field
+ */
+enum {
+ NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
+ NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
+};
+
+/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
+ * RDMA_QPTYPE field
+ */
+enum {
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
+ NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
+ NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
+ NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
+ NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
+};
+
+/* RDMA Connection Management Service Type codes for Discovery Log Page
+ * entry TSAS RDMA_CMS field
+ */
+enum {
+ NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
+};
+
+#define NVMF_AQ_DEPTH 32
enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
@@ -50,6 +122,13 @@ enum {
#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+/*
+ * Submission and Completion Queue Entry Sizes for the NVM command set.
+ * (In bytes and specified as a power of two (2^n)).
+ */
+#define NVME_NVM_IOSQES 6
+#define NVME_NVM_IOCQES 4
+
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
@@ -61,8 +140,8 @@ enum {
NVME_CC_SHN_NORMAL = 1 << 14,
NVME_CC_SHN_ABRUPT = 2 << 14,
NVME_CC_SHN_MASK = 3 << 14,
- NVME_CC_IOSQES = 6 << 16,
- NVME_CC_IOCQES = 4 << 20,
+ NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
+ NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
@@ -107,7 +186,11 @@ struct nvme_id_ctrl {
__u8 mdts;
__le16 cntlid;
__le32 ver;
- __u8 rsvd84[172];
+ __le32 rtd3r;
+ __le32 rtd3e;
+ __le32 oaes;
+ __le32 ctratt;
+ __u8 rsvd100[156];
__le16 oacs;
__u8 acl;
__u8 aerl;
@@ -119,10 +202,12 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
- __u8 rsvd270[242];
+ __u8 rsvd270[50];
+ __le16 kas;
+ __u8 rsvd322[190];
__u8 sqes;
__u8 cqes;
- __u8 rsvd514[2];
+ __le16 maxcmd;
__le32 nn;
__le16 oncs;
__le16 fuses;
@@ -135,7 +220,15 @@ struct nvme_id_ctrl {
__le16 acwu;
__u8 rsvd534[2];
__le32 sgls;
- __u8 rsvd540[1508];
+ __u8 rsvd540[228];
+ char subnqn[256];
+ __u8 rsvd1024[768];
+ __le32 ioccsz;
+ __le32 iorcsz;
+ __le16 icdoff;
+ __u8 ctrattr;
+ __u8 msdbd;
+ __u8 rsvd1804[244];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
@@ -274,6 +367,12 @@ struct nvme_reservation_status {
} regctl_ds[];
};
+enum nvme_async_event_type {
+ NVME_AER_TYPE_ERROR = 0,
+ NVME_AER_TYPE_SMART = 1,
+ NVME_AER_TYPE_NOTICE = 2,
+};
+
/* I/O commands */
enum nvme_opcode {
@@ -290,6 +389,84 @@ enum nvme_opcode {
nvme_cmd_resv_release = 0x15,
};
+/*
+ * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
+ *
+ * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
+ * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
+ * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
+ * request subtype
+ */
+enum {
+ NVME_SGL_FMT_ADDRESS = 0x00,
+ NVME_SGL_FMT_OFFSET = 0x01,
+ NVME_SGL_FMT_INVALIDATE = 0x0f,
+};
+
+/*
+ * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
+ *
+ * For struct nvme_sgl_desc:
+ * @NVME_SGL_FMT_DATA_DESC: data block descriptor
+ * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
+ * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
+ *
+ * For struct nvme_keyed_sgl_desc:
+ * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
+ */
+enum {
+ NVME_SGL_FMT_DATA_DESC = 0x00,
+ NVME_SGL_FMT_SEG_DESC = 0x02,
+ NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
+ NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
+};
+
+struct nvme_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+struct nvme_keyed_sgl_desc {
+ __le64 addr;
+ __u8 length[3];
+ __u8 key[4];
+ __u8 type;
+};
+
+union nvme_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct nvme_sgl_desc sgl;
+ struct nvme_keyed_sgl_desc ksgl;
+};
+
+/*
+ * Lowest two bits of our flags field (FUSE field in the spec):
+ *
+ * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
+ * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
+ *
+ * Highest two bits in our flags field (PSDT field in the spec):
+ *
+ * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
+ * If used, MPTR contains addr of single physical buffer (byte aligned).
+ * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
+ * If used, MPTR contains an address of an SGL segment containing
+ * exactly 1 SGL descriptor (qword aligned).
+ */
+enum {
+ NVME_CMD_FUSE_FIRST = (1 << 0),
+ NVME_CMD_FUSE_SECOND = (1 << 1),
+
+ NVME_CMD_SGL_METABUF = (1 << 6),
+ NVME_CMD_SGL_METASEG = (1 << 7),
+ NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
+};
+
struct nvme_common_command {
__u8 opcode;
__u8 flags;
@@ -297,8 +474,7 @@ struct nvme_common_command {
__le32 nsid;
__le32 cdw2[2];
__le64 metadata;
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le32 cdw10[6];
};
@@ -309,8 +485,7 @@ struct nvme_rw_command {
__le32 nsid;
__u64 rsvd2;
__le64 metadata;
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le64 slba;
__le16 length;
__le16 control;
@@ -350,8 +525,7 @@ struct nvme_dsm_cmd {
__u16 command_id;
__le32 nsid;
__u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le32 nr;
__le32 attributes;
__u32 rsvd12[4];
@@ -384,6 +558,7 @@ enum nvme_admin_opcode {
nvme_admin_async_event = 0x0c,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
@@ -408,6 +583,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -415,6 +591,7 @@ enum {
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
NVME_FWACT_REPL_ACTV = (1 << 3),
@@ -427,8 +604,7 @@ struct nvme_identify {
__u16 command_id;
__le32 nsid;
__u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le32 cns;
__u32 rsvd11[5];
};
@@ -439,8 +615,7 @@ struct nvme_features {
__u16 command_id;
__le32 nsid;
__u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le32 fid;
__le32 dword11;
__u32 rsvd12[4];
@@ -499,8 +674,7 @@ struct nvme_download_firmware {
__u8 flags;
__u16 command_id;
__u32 rsvd1[5];
- __le64 prp1;
- __le64 prp2;
+ union nvme_data_ptr dptr;
__le32 numd;
__le32 offset;
__u32 rsvd12[4];
@@ -516,6 +690,143 @@ struct nvme_format_cmd {
__u32 rsvd11[5];
};
+struct nvme_get_log_page_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __u8 lid;
+ __u8 rsvd10;
+ __le16 numdl;
+ __le16 numdu;
+ __u16 rsvd11;
+ __le32 lpol;
+ __le32 lpou;
+ __u32 rsvd14[2];
+};
+
+/*
+ * Fabrics subcommands.
+ */
+enum nvmf_fabrics_opcode {
+ nvme_fabrics_command = 0x7f,
+};
+
+enum nvmf_capsule_command {
+ nvme_fabrics_type_property_set = 0x00,
+ nvme_fabrics_type_connect = 0x01,
+ nvme_fabrics_type_property_get = 0x04,
+};
+
+struct nvmf_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[35];
+ __u8 ts[24];
+};
+
+/*
+ * The legal cntlid range a NVMe Target will provide.
+ * Note that cntlid of value 0 is considered illegal in the fabrics world.
+ * Devices based on earlier specs did not have the subsystem concept;
+ * therefore, those devices had their cntlid value set to 0 as a result.
+ */
+#define NVME_CNTLID_MIN 1
+#define NVME_CNTLID_MAX 0xffef
+#define NVME_CNTLID_DYNAMIC 0xffff
+
+#define MAX_DISC_LOGS 255
+
+/* Discovery log page entry */
+struct nvmf_disc_rsp_page_entry {
+ __u8 trtype;
+ __u8 adrfam;
+ __u8 nqntype;
+ __u8 treq;
+ __le16 portid;
+ __le16 cntlid;
+ __le16 asqsz;
+ __u8 resv8[22];
+ char trsvcid[NVMF_TRSVCID_SIZE];
+ __u8 resv64[192];
+ char subnqn[NVMF_NQN_FIELD_LEN];
+ char traddr[NVMF_TRADDR_SIZE];
+ union tsas {
+ char common[NVMF_TSAS_SIZE];
+ struct rdma {
+ __u8 qptype;
+ __u8 prtype;
+ __u8 cms;
+ __u8 resv3[5];
+ __u16 pkey;
+ __u8 resv10[246];
+ } rdma;
+ } tsas;
+};
+
+/* Discovery log page header */
+struct nvmf_disc_rsp_page_hdr {
+ __le64 genctr;
+ __le64 numrec;
+ __le16 recfmt;
+ __u8 resv14[1006];
+ struct nvmf_disc_rsp_page_entry entries[0];
+};
+
+struct nvmf_connect_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __le16 recfmt;
+ __le16 qid;
+ __le16 sqsize;
+ __u8 cattr;
+ __u8 resv3;
+ __le32 kato;
+ __u8 resv4[12];
+};
+
+struct nvmf_connect_data {
+ uuid_be hostid;
+ __le16 cntlid;
+ char resv4[238];
+ char subsysnqn[NVMF_NQN_FIELD_LEN];
+ char hostnqn[NVMF_NQN_FIELD_LEN];
+ char resv5[256];
+};
+
+struct nvmf_property_set_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[35];
+ __u8 attrib;
+ __u8 resv3[3];
+ __le32 offset;
+ __le64 value;
+ __u8 resv4[8];
+};
+
+struct nvmf_property_get_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[35];
+ __u8 attrib;
+ __u8 resv3[3];
+ __le32 offset;
+ __u8 resv4[16];
+};
+
struct nvme_command {
union {
struct nvme_common_command common;
@@ -529,10 +840,30 @@ struct nvme_command {
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
struct nvme_abort_cmd abort;
+ struct nvme_get_log_page_command get_log_page;
+ struct nvmf_common_command fabrics;
+ struct nvmf_connect_command connect;
+ struct nvmf_property_set_command prop_set;
+ struct nvmf_property_get_command prop_get;
};
};
+static inline bool nvme_is_write(struct nvme_command *cmd)
+{
+ /*
+ * What a mess...
+ *
+ * Why can't we simply have a Fabrics In and Fabrics out command?
+ */
+ if (unlikely(cmd->common.opcode == nvme_fabrics_command))
+ return cmd->fabrics.opcode & 1;
+ return cmd->common.opcode & 1;
+}
+
enum {
+ /*
+ * Generic Command Status:
+ */
NVME_SC_SUCCESS = 0x0,
NVME_SC_INVALID_OPCODE = 0x1,
NVME_SC_INVALID_FIELD = 0x2,
@@ -551,10 +882,18 @@ enum {
NVME_SC_SGL_INVALID_DATA = 0xf,
NVME_SC_SGL_INVALID_METADATA = 0x10,
NVME_SC_SGL_INVALID_TYPE = 0x11,
+
+ NVME_SC_SGL_INVALID_OFFSET = 0x16,
+ NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
NVME_SC_RESERVATION_CONFLICT = 0x83,
+
+ /*
+ * Command Specific Status:
+ */
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -572,9 +911,29 @@ enum {
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+
+ /*
+ * I/O Command Set Specific - NVM commands:
+ */
NVME_SC_BAD_ATTRIBUTES = 0x180,
NVME_SC_INVALID_PI = 0x181,
NVME_SC_READ_ONLY = 0x182,
+
+ /*
+ * I/O Command Set Specific - Fabrics commands:
+ */
+ NVME_SC_CONNECT_FORMAT = 0x180,
+ NVME_SC_CONNECT_CTRL_BUSY = 0x181,
+ NVME_SC_CONNECT_INVALID_PARAM = 0x182,
+ NVME_SC_CONNECT_RESTART_DISC = 0x183,
+ NVME_SC_CONNECT_INVALID_HOST = 0x184,
+
+ NVME_SC_DISCOVERY_RESTART = 0x190,
+ NVME_SC_AUTH_REQUIRED = 0x191,
+
+ /*
+ * Media and Data Integrity Errors:
+ */
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -582,12 +941,19 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
+
NVME_SC_DNR = 0x4000,
};
struct nvme_completion {
- __le32 result; /* Used by admin commands to return data */
- __u32 rsvd;
+ /*
+ * Used by Admin and Fabrics commands to return data:
+ */
+ union {
+ __le16 result16;
+ __le32 result;
+ __le64 result64;
+ };
__le16 sq_head; /* how much of this queue may be reclaimed */
__le16 sq_id; /* submission queue that generated this entry */
__u16 command_id; /* of the command which completed */
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 9bb77d3ed..c2256d746 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
{
}
-static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/of.h b/include/linux/of.h
index 74eb28cad..3d9ff8e9d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -238,13 +238,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#endif
-/* Default string compare functions, Allow arch asm/prom.h to override */
-#if !defined(of_compat_cmp)
-#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
-#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
-#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -324,6 +317,8 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
+extern int of_device_compatible_match(struct device_node *device,
+ const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
@@ -726,6 +721,13 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
+/* Default string compare functions, Allow arch asm/prom.h to override */
+#if !defined(of_compat_cmp)
+#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+#endif
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
@@ -1009,10 +1011,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
+typedef int (*of_init_fn_1_ret)(struct device_node *);
typedef void (*of_init_fn_1)(struct device_node *);
#define OF_DECLARE_1(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_1)
+#define OF_DECLARE_1_RET(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
#define OF_DECLARE_2(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_2)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 901ec01c9..26c3302ae 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -53,6 +53,8 @@ extern char __dtb_end[];
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
void *data);
+extern int of_get_flat_dt_subnode_by_name(unsigned long node,
+ const char *uname);
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index bd02b4490..e80b9c762 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -11,7 +11,6 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
-extern void of_iommu_init(void);
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np);
@@ -24,7 +23,6 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL;
}
-static inline void of_iommu_init(void) { }
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 8f2237eb3..2ab233661 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -19,12 +19,17 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
struct device_node *phy_np,
void (*hndlr)(struct net_device *),
u32 flags, phy_interface_t iface);
+extern struct phy_device *
+of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
+ void (*hndlr)(struct net_device *));
struct phy_device *of_phy_attach(struct net_device *dev,
struct device_node *phy_np, u32 flags,
phy_interface_t iface);
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
+extern int of_phy_register_fixed_link(struct device_node *np);
+extern bool of_phy_is_fixed_link(struct device_node *np);
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -50,6 +55,13 @@ static inline struct phy_device *of_phy_connect(struct net_device *dev,
return NULL;
}
+static inline struct phy_device *
+of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
+ void (*hndlr)(struct net_device *))
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_attach(struct net_device *dev,
struct device_node *phy_np,
u32 flags, phy_interface_t iface)
@@ -67,12 +79,6 @@ static inline int of_mdio_parse_addr(struct device *dev,
{
return -ENOSYS;
}
-#endif /* CONFIG_OF */
-
-#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
-extern int of_phy_register_fixed_link(struct device_node *np);
-extern bool of_phy_is_fixed_link(struct device_node *np);
-#else
static inline int of_phy_register_fixed_link(struct device_node *np)
{
return -ENOSYS;
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index c201060e0..f8e1992d6 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -1,7 +1,8 @@
#ifndef __OF_RESERVED_MEM_H
#define __OF_RESERVED_MEM_H
-struct device;
+#include <linux/device.h>
+
struct of_phandle_args;
struct reserved_mem_ops;
@@ -28,7 +29,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
#ifdef CONFIG_OF_RESERVED_MEM
-int of_reserved_mem_device_init(struct device *dev);
+
+int of_reserved_mem_device_init_by_idx(struct device *dev,
+ struct device_node *np, int idx);
void of_reserved_mem_device_release(struct device *dev);
int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
@@ -42,7 +45,8 @@ void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
#else
-static inline int of_reserved_mem_device_init(struct device *dev)
+static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
+ struct device_node *np, int idx)
{
return -ENOSYS;
}
@@ -53,4 +57,19 @@ static inline void fdt_reserved_mem_save_node(unsigned long node,
const char *uname, phys_addr_t base, phys_addr_t size) { }
#endif
+/**
+ * of_reserved_mem_device_init() - assign reserved memory region to given device
+ * @dev: Pointer to the device to configure
+ *
+ * This function assigns respective DMA-mapping operations based on the first
+ * reserved memory region specified by 'memory-region' property in device tree
+ * node of the given device.
+ *
+ * Returns error code or zero on success.
+ */
+static inline int of_reserved_mem_device_init(struct device *dev)
+{
+ return of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
+}
+
#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 834695226..5bc0457ee 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -23,6 +23,9 @@ struct oom_control {
/* Used to determine mempolicy */
nodemask_t *nodemask;
+ /* Memory cgroup in which oom is invoked, or NULL for global oom */
+ struct mem_cgroup *memcg;
+
/* Used to determine cpuset and node locality requirement */
const gfp_t gfp_mask;
@@ -70,9 +73,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
extern void mark_oom_victim(struct task_struct *tsk);
#ifdef CONFIG_MMU
-extern void try_oom_reaper(struct task_struct *tsk);
+extern void wake_oom_reaper(struct task_struct *tsk);
#else
-static inline void try_oom_reaper(struct task_struct *tsk)
+static inline void wake_oom_reaper(struct task_struct *tsk)
{
}
#endif
@@ -83,14 +86,13 @@ extern unsigned long oom_badness(struct task_struct *p,
extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
- struct mem_cgroup *memcg, const char *message);
+ const char *message);
extern void check_panic_on_oom(struct oom_control *oc,
- enum oom_constraint constraint,
- struct mem_cgroup *memcg);
+ enum oom_constraint constraint);
extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
- struct task_struct *task, unsigned long totalpages);
+ struct task_struct *task);
extern bool out_of_memory(struct oom_control *oc);
@@ -105,27 +107,7 @@ extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-static inline bool task_will_free_mem(struct task_struct *task)
-{
- struct signal_struct *sig = task->signal;
-
- /*
- * A coredumping process may sleep for an extended period in exit_mm(),
- * so the oom killer cannot assume that the process will promptly exit
- * and release memory.
- */
- if (sig->flags & SIGNAL_GROUP_COREDUMP)
- return false;
-
- if (!(task->flags & PF_EXITING))
- return false;
-
- /* Make sure that the whole thread group is going down */
- if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT))
- return false;
-
- return true;
-}
+bool task_will_free_mem(struct task_struct *task);
/* sysctls */
extern int sysctl_oom_dump_tasks;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e8041839a..74e4dda91 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -101,12 +101,6 @@ enum pageflags {
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#ifdef CONFIG_TOI_INCREMENTAL
- PG_toi_untracked, /* Don't track dirtiness of this page - assume always dirty */
- PG_toi_ro, /* Page was made RO by TOI */
- PG_toi_cbw, /* Copy the page before it is written to */
- PG_toi_dirty, /* Page has been modified */
-#endif
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
@@ -135,6 +129,9 @@ enum pageflags {
/* Compound pages. Stored in first tail page's flags */
PG_double_map = PG_private_2,
+
+ /* non-lru isolated movable page */
+ PG_isolated = PG_reclaim,
};
#ifndef __GENERATING_BOUNDS_H
@@ -298,11 +295,11 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
-PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND)
+PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
-PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
+ TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
@@ -349,17 +346,6 @@ TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
-#ifdef CONFIG_TOI_INCREMENTAL
-PAGEFLAG(TOI_RO, toi_ro)
-PAGEFLAG(TOI_Dirty, toi_dirty)
-PAGEFLAG(TOI_Untracked, toi_untracked)
-PAGEFLAG(TOI_CBW, toi_cbw)
-#else
-PAGEFLAG_FALSE(TOI_RO)
-PAGEFLAG_FALSE(TOI_Dirty)
-PAGEFLAG_FALSE(TOI_Untracked)
-PAGEFLAG_FALSE(TOI_CBW)
-#endif
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young, young, PF_ANY)
@@ -374,29 +360,37 @@ PAGEFLAG(Idle, idle, PF_ANY)
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
+ * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * bit; and then page->mapping points, not to an anon_vma, but to a private
* structure which KSM associates with that merged page. See ksm.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * page and then page->mapping points a struct address_space.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+#define PAGE_MAPPING_ANON 0x1
+#define PAGE_MAPPING_MOVABLE 0x2
+#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-static __always_inline int PageAnonHead(struct page *page)
+static __always_inline int PageMappingFlags(struct page *page)
{
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
static __always_inline int PageAnon(struct page *page)
{
page = compound_head(page);
- return PageAnonHead(page);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline int __PageMovable(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
}
#ifdef CONFIG_KSM
@@ -410,7 +404,7 @@ static __always_inline int PageKsm(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ PAGE_MAPPING_KSM;
}
#else
TESTPAGEFLAG_FALSE(Ksm)
@@ -587,6 +581,17 @@ static inline int PageDoubleMap(struct page *page)
return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
}
+static inline void SetPageDoubleMap(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ set_bit(PG_double_map, &page[1].flags);
+}
+
+static inline void ClearPageDoubleMap(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ clear_bit(PG_double_map, &page[1].flags);
+}
static inline int TestSetPageDoubleMap(struct page *page)
{
VM_BUG_ON_PAGE(!PageHead(page), page);
@@ -604,59 +609,59 @@ TESTPAGEFLAG_FALSE(TransHuge)
TESTPAGEFLAG_FALSE(TransCompound)
TESTPAGEFLAG_FALSE(TransCompoundMap)
TESTPAGEFLAG_FALSE(TransTail)
-TESTPAGEFLAG_FALSE(DoubleMap)
+PAGEFLAG_FALSE(DoubleMap)
TESTSETFLAG_FALSE(DoubleMap)
TESTCLEARFLAG_FALSE(DoubleMap)
#endif
/*
+ * For pages that are never mapped to userspace, page->mapcount may be
+ * used for storing extra information about page type. Any value used
+ * for this purpose must be <= -2, but it's better start not too close
+ * to -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a special page.
+ */
+#define PAGE_MAPCOUNT_OPS(uname, lname) \
+static __always_inline int Page##uname(struct page *page) \
+{ \
+ return atomic_read(&page->_mapcount) == \
+ PAGE_##lname##_MAPCOUNT_VALUE; \
+} \
+static __always_inline void __SetPage##uname(struct page *page) \
+{ \
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
+ atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
+} \
+static __always_inline void __ClearPage##uname(struct page *page) \
+{ \
+ VM_BUG_ON_PAGE(!Page##uname(page), page); \
+ atomic_set(&page->_mapcount, -1); \
+}
+
+/*
* PageBuddy() indicate that the page is free and in the buddy system
* (see mm/page_alloc.c).
- *
- * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
- * -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
- * efficiently by most CPU architectures.
*/
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-
-static inline int PageBuddy(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
-}
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
-static inline void __SetPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
-}
+/*
+ * PageBalloon() is set on pages that are on the balloon page list
+ * (see mm/balloon_compaction.c).
+ */
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
-static inline void __ClearPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBuddy(page), page);
- atomic_set(&page->_mapcount, -1);
-}
+/*
+ * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
+ * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
+ */
+#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
+PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
extern bool is_free_buddy_page(struct page *page);
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-
-static inline int PageBalloon(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
- atomic_set(&page->_mapcount, -1);
-}
+__PAGEFLAG(Isolated, isolated, PF_ANY);
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
@@ -711,12 +716,8 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
* alloc-free cycle to prevent from reusing the page.
*/
-#ifdef CONFIG_TOI_INCREMENTAL
-#define PAGE_FLAGS_CHECK_AT_PREP (((1 << NR_PAGEFLAGS) - 1) & \
- ~((1 << PG_toi_dirty) | (1 << PG_toi_ro) | __PG_HWPOISON))
-#else
-#define PAGE_FLAGS_CHECK_AT_PREP (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
-#endif
+#define PAGE_FLAGS_CHECK_AT_PREP \
+ (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index e1fe7cf5b..03f2a3e7d 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
struct pglist_data;
struct page_ext_operations {
@@ -44,9 +45,8 @@ struct page_ext {
#ifdef CONFIG_PAGE_OWNER
unsigned int order;
gfp_t gfp_mask;
- unsigned int nr_entries;
int last_migrate_reason;
- unsigned long trace_entries[8];
+ depot_stack_handle_t handle;
#endif
};
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 46f1b9399..30583ab0f 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
-extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __split_page_owner(struct page *page, unsigned int order);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(struct page *page);
@@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page, unsigned int order)
{
if (static_branch_unlikely(&page_owner_inited))
- return __get_page_owner_gfp(page);
- else
- return 0;
+ __split_page_owner(page, order);
}
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
{
@@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page,
+ unsigned int order)
{
- return 0;
}
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
{
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 8b5e0a9f2..610e13271 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -124,6 +124,15 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
return ret;
}
+static inline int page_ref_inc_return(struct page *page)
+{
+ int ret = atomic_inc_return(&page->_refcount);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, 1, ret);
+ return ret;
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1f94053b3..01e84436c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
-static inline struct page *page_cache_alloc_readahead(struct address_space *x)
+static inline gfp_t readahead_gfp_mask(struct address_space *x)
{
- return __page_cache_alloc(mapping_gfp_mask(x) |
- __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
+ return mapping_gfp_mask(x) |
+ __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
}
typedef int filler_t(void *, struct page *);
@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
-void page_endio(struct page *page, int rw, int err);
+void page_endio(struct page *page, bool is_write, int err);
/*
* Add an arbitrary waiter to a page's wait queue
@@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
return __get_user(c, end);
}
+ (void)c;
return 0;
}
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 89ab0572d..7d63a66e8 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -24,6 +24,8 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
}
extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
+extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res);
+
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
struct pci_bus *pbus = pdev->bus;
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
new file mode 100644
index 000000000..7adad206b
--- /dev/null
+++ b/include/linux/pci-ecam.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+#ifndef DRIVERS_PCI_ECAM_H
+#define DRIVERS_PCI_ECAM_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+/*
+ * struct to hold pci ops and bus shift of the config window
+ * for a PCI controller.
+ */
+struct pci_config_window;
+struct pci_ecam_ops {
+ unsigned int bus_shift;
+ struct pci_ops pci_ops;
+ int (*init)(struct pci_config_window *);
+};
+
+/*
+ * struct to hold the mappings of a config space window. This
+ * is expected to be used as sysdata for PCI controllers that
+ * use ECAM.
+ */
+struct pci_config_window {
+ struct resource res;
+ struct resource busr;
+ void *priv;
+ struct pci_ecam_ops *ops;
+ union {
+ void __iomem *win; /* 64-bit single mapping */
+ void __iomem **winp; /* 32-bit per-bus mapping */
+ };
+ struct device *parent;/* ECAM res was from this dev */
+};
+
+/* create and free pci_config_window */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+ struct resource *cfgres, struct resource *busr,
+ struct pci_ecam_ops *ops);
+void pci_ecam_free(struct pci_config_window *cfg);
+
+/* map_bus when ->sysdata is an instance of pci_config_window */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+/* default ECAM ops */
+extern struct pci_ecam_ops pci_generic_ecam_ops;
+
+#ifdef CONFIG_PCI_HOST_GENERIC
+/* for DT-based PCI controllers that support ECAM */
+int pci_host_common_probe(struct platform_device *pdev,
+ struct pci_ecam_ops *ops);
+#endif
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b67e4df20..0ab835965 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -101,6 +101,10 @@ enum {
DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
};
+/*
+ * pci_power_t values must match the bits in the Capabilities PME_Support
+ * and Control/Status PowerState fields in the Power Management capability.
+ */
typedef int __bitwise pci_power_t;
#define PCI_D0 ((pci_power_t __force) 0)
@@ -116,7 +120,7 @@ extern const char *pci_power_names[];
static inline const char *pci_power_name(pci_power_t state)
{
- return pci_power_names[1 + (int) state];
+ return pci_power_names[1 + (__force int) state];
}
#define PCI_PM_D2_DELAY 200
@@ -294,6 +298,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
unsigned int no_d3cold:1; /* D3cold is forbidden */
+ unsigned int bridge_d3:1; /* Allow D3 for bridge */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
unsigned int mmio_always_on:1; /* disallow turning off io/mem
decoding during bar sizing */
@@ -320,6 +325,7 @@ struct pci_dev {
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
+ struct cpumask *irq_affinity;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
bool match_driver; /* Skip attaching driver */
@@ -677,15 +683,6 @@ struct pci_driver {
#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
- const struct pci_device_id _table[]
-
-/**
* PCI_DEVICE - macro used to describe a specific pci device
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
@@ -854,6 +851,7 @@ void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
void pci_stop_root_bus(struct pci_bus *bus);
void pci_remove_root_bus(struct pci_bus *bus);
void pci_setup_cardbus(struct pci_bus *bus);
+void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
@@ -1083,6 +1081,8 @@ int pci_back_from_sleep(struct pci_dev *dev);
bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
+void pci_d3cold_enable(struct pci_dev *dev);
+void pci_d3cold_disable(struct pci_dev *dev);
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
@@ -1114,6 +1114,7 @@ int pci_set_vpd_size(struct pci_dev *dev, size_t len);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
void pci_bus_assign_resources(const struct pci_bus *bus);
+void pci_bus_claim_resources(struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
@@ -1143,9 +1144,12 @@ void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
void pci_free_resource_list(struct list_head *resources);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ unsigned int flags);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+int devm_request_pci_bus_resources(struct device *dev,
+ struct list_head *resources);
#define pci_bus_for_each_resource(bus, res, i) \
for (i = 0; \
@@ -1167,6 +1171,7 @@ int pci_register_io_range(phys_addr_t addr, resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+void pci_unmap_iospace(struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1237,6 +1242,13 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
+#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_ALL_TYPES \
+ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
+
/* kmem_cache style wrapper around pci_alloc_consistent() */
#include <linux/pci-dma.h>
@@ -1284,6 +1296,11 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
+void pci_free_irq_vectors(struct pci_dev *dev);
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_msi_shutdown(struct pci_dev *dev) { }
@@ -1307,6 +1324,24 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
+static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
+ unsigned int min_vecs, unsigned int max_vecs,
+ unsigned int flags)
+{
+ if (min_vecs > 1)
+ return -EINVAL;
+ return 1;
+}
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+}
+
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ if (WARN_ON_ONCE(nr > 0))
+ return -EINVAL;
+ return dev->irq;
+}
#endif
#ifdef CONFIG_PCIEPORTBUS
@@ -1389,12 +1424,13 @@ static inline int pci_domain_nr(struct pci_bus *bus)
{
return bus->domain_nr;
}
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#ifdef CONFIG_ACPI
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
#else
-static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
- struct device *parent)
-{
-}
+static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{ return 0; }
+#endif
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
#endif
/* some architectures require additional setup to direct VGA traffic */
@@ -1402,6 +1438,34 @@ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
void pci_register_set_vga_state(arch_set_vga_state_t func);
+static inline int
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void
+pci_release_io_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+}
+
#else /* CONFIG_PCI is not enabled */
static inline void pci_set_flags(int flags) { }
@@ -1554,7 +1618,11 @@ static inline const char *pci_name(const struct pci_dev *pdev)
/* Some archs don't want to expose struct resource to userland as-is
* in sysfs and /proc
*/
-#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc,
+ resource_size_t *start, resource_size_t *end);
+#else
static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc, resource_size_t *start,
resource_size_t *end)
@@ -1706,6 +1774,7 @@ extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_bus_size;
/* Architecture-specific versions may override these (weak) */
void pcibios_disable_device(struct pci_dev *dev);
@@ -1722,7 +1791,7 @@ void pcibios_free_irq(struct pci_dev *dev);
extern struct dev_pm_ops pcibios_pm_ops;
#endif
-#ifdef CONFIG_PCI_MMCONFIG
+#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
#else
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 84f542df7..1c7eec09e 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* used as a pointer. If the compiler generates a separate fetch
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
- * ACCESS_ONCE() is required when fetching it.
- *
- * Also, we need a data dependency barrier to be paired with
- * smp_store_release() in __percpu_ref_switch_to_percpu().
- *
- * Use lockless deref which contains both.
+ * READ_ONCE() is required when fetching it.
*/
- percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+ percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
+
+ /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
+ smp_read_barrier_depends();
/*
* Theoretically, the following could test just ATOMIC; however,
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index d28ac05c7..e18843809 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -109,7 +109,7 @@ struct arm_pmu {
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
- struct notifier_block hotplug_nb;
+ struct list_head entry;
struct notifier_block cpu_pm_nb;
};
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1a827cecd..2b6b43cc0 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -69,9 +69,22 @@ struct perf_callchain_entry_ctx {
bool contexts_maxed;
};
+typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
+ unsigned long off, unsigned long len);
+
+struct perf_raw_frag {
+ union {
+ struct perf_raw_frag *next;
+ unsigned long pad;
+ };
+ perf_copy_f copy;
+ void *data;
+ u32 size;
+} __packed;
+
struct perf_raw_record {
+ struct perf_raw_frag frag;
u32 size;
- void *data;
};
/*
@@ -517,6 +530,11 @@ struct swevent_hlist {
struct perf_cgroup;
struct ring_buffer;
+struct pmu_event_list {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -675,6 +693,7 @@ struct perf_event {
int cgrp_defer_enabled;
#endif
+ struct list_head sb_list;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -724,7 +743,9 @@ struct perf_event_context {
u64 parent_gen;
u64 generation;
int pin_count;
+#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
+#endif
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
};
@@ -750,7 +771,9 @@ struct perf_cpu_context {
unsigned int hrtimer_active;
struct pmu *unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
+#endif
};
struct perf_output_handle {
@@ -1074,7 +1097,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
-extern int get_callchain_buffers(void);
+extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern int sysctl_perf_event_max_stack;
@@ -1283,42 +1306,12 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
-
-/*
- * This has to have a higher priority than migration_notifier in sched/core.c.
- */
-#define perf_cpu_notifier(fn) \
-do { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- unsigned long cpu = smp_processor_id(); \
- unsigned long flags; \
- \
- cpu_notifier_register_begin(); \
- fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
- (void *)(unsigned long)cpu); \
- local_irq_save(flags); \
- fn(&fn##_nb, (unsigned long)CPU_STARTING, \
- (void *)(unsigned long)cpu); \
- local_irq_restore(flags); \
- fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
- (void *)(unsigned long)cpu); \
- __register_cpu_notifier(&fn##_nb); \
- cpu_notifier_register_done(); \
-} while (0)
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
-/*
- * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
- * callback for already online CPUs.
- */
-#define __perf_cpu_notifier(fn) \
-do { \
- static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- \
- __register_cpu_notifier(&fn##_nb); \
-} while (0)
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
struct device_attribute attr;
@@ -1326,6 +1319,13 @@ struct perf_pmu_events_attr {
const char *event_str;
};
+struct perf_pmu_events_ht_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
+};
+
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
@@ -1354,4 +1354,13 @@ _name##_show(struct device *dev, \
\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
+/* Performance counter hotplug functions */
+#ifdef CONFIG_PERF_EVENTS
+int perf_event_init_cpu(unsigned int cpu);
+int perf_event_exit_cpu(unsigned int cpu);
+#else
+#define perf_event_init_cpu NULL
+#define perf_event_exit_cpu NULL
+#endif
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 94994810c..a3d90b9da 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -28,7 +28,10 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
return __pfn_to_pfn_t(pfn, 0);
}
-extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
+static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
+{
+ return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
+}
static inline bool pfn_t_has_page(pfn_t pfn)
{
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index a810f2a18..f08b67238 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -22,12 +22,20 @@
struct phy;
+enum phy_mode {
+ PHY_MODE_INVALID,
+ PHY_MODE_USB_HOST,
+ PHY_MODE_USB_DEVICE,
+ PHY_MODE_USB_OTG,
+};
+
/**
* struct phy_ops - set of function pointers for performing phy operations
* @init: operation to be performed for initializing phy
* @exit: operation to be performed while exiting
* @power_on: powering on the phy
* @power_off: powering off the phy
+ * @set_mode: set the mode of the phy
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -35,6 +43,7 @@ struct phy_ops {
int (*exit)(struct phy *phy);
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
+ int (*set_mode)(struct phy *phy, enum phy_mode mode);
struct module *owner;
};
@@ -126,6 +135,7 @@ int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
+int phy_set_mode(struct phy *phy, enum phy_mode mode);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -233,6 +243,13 @@ static inline int phy_power_off(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d921afd5f..12343caa1 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -175,6 +175,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
unsigned *num_maps, enum pinctrl_map_type type);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps);
static inline int pinconf_generic_dt_node_to_map_group(
struct pinctrl_dev *pctldev, struct device_node *np_config,
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
index 3c73c045f..e68454325 100644
--- a/include/linux/platform_data/asoc-ti-mcbsp.h
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -44,7 +44,7 @@ struct omap_mcbsp_platform_data {
/* McBSP platform and instance specific features */
bool has_wakeup; /* Wakeup capability */
bool has_ccr; /* Transceiver has configuration control registers */
- int (*enable_st_clock)(unsigned int, bool);
+ int (*force_ick_on)(struct clk *clk, bool force_on);
};
/**
@@ -55,4 +55,6 @@ struct omap_mcbsp_dev_attr {
const char *sidetone;
};
+void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata);
+
#endif
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index be830b141..271a4e25a 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/nvmem-consumer.h>
+#include <linux/bitops.h>
/**
* struct at24_platform_data - data to set up at24 (generic eeprom) driver
@@ -43,10 +44,12 @@ struct at24_platform_data {
u32 byte_len; /* size (sum of all addr) */
u16 page_size; /* for writes */
u8 flags;
-#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */
-#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
-#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
-#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
+#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */
+#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */
+#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */
+#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */
+#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */
+#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */
void (*setup)(struct nvmem_device *nvmem, void *context);
void *context;
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
new file mode 100644
index 000000000..69d279c0d
--- /dev/null
+++ b/include/linux/platform_data/b53.h
@@ -0,0 +1,33 @@
+/*
+ * B53 platform data
+ *
+ * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_H
+#define __B53_H
+
+#include <linux/kernel.h>
+
+struct b53_platform_data {
+ u32 chip_id;
+ u16 enabled_ports;
+
+ /* only used by MMAP'd driver */
+ unsigned big_endian:1;
+ void __iomem *regs;
+};
+
+#endif
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 3038120ca..812d87307 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -2,10 +2,7 @@
#define _LIRC_RX51_H
struct lirc_rx51_platform_data {
- int pwm_timer;
-
int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
- struct pwm_omap_dmtimer_pdata *dmtimer;
};
#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 95ccab3f4..7daa78a2f 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -46,5 +46,6 @@ struct esdhc_platform_data {
bool support_vsel;
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
+ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
new file mode 100644
index 000000000..679177929
--- /dev/null
+++ b/include/linux/platform_data/omapdss.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAPDSS_PDATA_H
+#define __OMAPDSS_PDATA_H
+
+enum omapdss_version {
+ OMAPDSS_VER_UNKNOWN = 0,
+ OMAPDSS_VER_OMAP24xx,
+ OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
+ OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
+ OMAPDSS_VER_OMAP3630,
+ OMAPDSS_VER_AM35xx,
+ OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
+ OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
+ OMAPDSS_VER_OMAP4, /* All other OMAP4s */
+ OMAPDSS_VER_OMAP5,
+ OMAPDSS_VER_AM43xx,
+ OMAPDSS_VER_DRA7xx,
+};
+
+/* Board specific data */
+struct omap_dss_board_info {
+ const char *default_display_name;
+ int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask);
+ void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask);
+ int (*set_min_bus_tput)(struct device *dev, unsigned long r);
+ enum omapdss_version version;
+};
+
+#endif /* __OMAPDSS_PDATA_H */
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
new file mode 100644
index 000000000..22c538255
--- /dev/null
+++ b/include/linux/platform_data/rtc-ds2404.h
@@ -0,0 +1,20 @@
+/*
+ * ds2404.h - platform data structure for the DS2404 RTC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
+ */
+
+#ifndef __LINUX_DS2404_H
+#define __LINUX_DS2404_H
+
+struct ds2404_platform_data {
+
+ unsigned int gpio_rst;
+ unsigned int gpio_clk;
+ unsigned int gpio_dq;
+};
+#endif
diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h
new file mode 100644
index 000000000..915d6b4f0
--- /dev/null
+++ b/include/linux/platform_data/rtc-m48t86.h
@@ -0,0 +1,16 @@
+/*
+ * ST M48T86 / Dallas DS12887 RTC driver
+ * Copyright (c) 2006 Tower Technologies
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct m48t86_ops
+{
+ void (*writebyte)(unsigned char value, unsigned long addr);
+ unsigned char (*readbyte)(unsigned long addr);
+};
diff --git a/include/linux/platform_data/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h
new file mode 100644
index 000000000..e55d82ceb
--- /dev/null
+++ b/include/linux/platform_data/rtc-v3020.h
@@ -0,0 +1,41 @@
+/*
+ * v3020.h - Registers definition and platform data structure for the v3020 RTC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 8D Technologies inc.
+ */
+#ifndef __LINUX_V3020_H
+#define __LINUX_V3020_H
+
+/* The v3020 has only one data pin but which one
+ * is used depends on the board. */
+struct v3020_platform_data {
+ int leftshift; /* (1<<(leftshift)) & readl() */
+
+ unsigned int use_gpio:1;
+ unsigned int gpio_cs;
+ unsigned int gpio_wr;
+ unsigned int gpio_rd;
+ unsigned int gpio_io;
+};
+
+#define V3020_STATUS_0 0x00
+#define V3020_STATUS_1 0x01
+#define V3020_SECONDS 0x02
+#define V3020_MINUTES 0x03
+#define V3020_HOURS 0x04
+#define V3020_MONTH_DAY 0x05
+#define V3020_MONTH 0x06
+#define V3020_YEAR 0x07
+#define V3020_WEEK_DAY 0x08
+#define V3020_WEEK 0x09
+
+#define V3020_IS_COMMAND(val) ((val)>=0x0E)
+
+#define V3020_CMD_RAM2CLOCK 0x0E
+#define V3020_CMD_CLOCK2RAM 0x0F
+
+#endif /* __LINUX_V3020_H */
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
new file mode 100644
index 000000000..2e5eea358
--- /dev/null
+++ b/include/linux/platform_data/sht3x.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Sensirion AG, Switzerland
+ * Author: David Frey <david.frey@sensirion.com>
+ * Author: Pascal Sachs <pascal.sachs@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHT3X_H_
+#define __SHT3X_H_
+
+struct sht3x_platform_data {
+ bool blocking_io;
+ bool high_precision;
+};
+#endif /* __SHT3X_H_ */
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index fb5625bcc..5c1e21c87 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -38,6 +38,7 @@ struct s3c64xx_spi_csinfo {
struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
+ bool no_cs;
int (*cfg_gpio)(void);
dma_filter_fn filter;
void *dma_tx;
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
index 817dfdb37..6f0fb6ebd 100644
--- a/include/linux/platform_data/st33zp24.h
+++ b/include/linux/platform_data/st33zp24.h
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
- * Copyright (C) 2009 - 2015 STMicroelectronics
+ * Copyright (C) 2009 - 2016 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 308d6044f..09779b0ae 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -42,6 +42,7 @@ extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id);
extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
+extern int of_pm_clk_add_clk(struct device *dev, const char *name);
extern int of_pm_clk_add_clks(struct device *dev);
extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 39285c7bd..31fec8580 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -57,7 +57,6 @@ struct generic_pm_domain {
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
- bool suspend_power_off; /* Power status before system suspend */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct gpd_dev_ops dev_ops;
@@ -128,8 +127,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
-extern void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off);
+extern int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -164,9 +163,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off)
+static inline int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
{
+ return -ENOSYS;
}
#endif
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 57d146fe4..e856c2cb0 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -26,47 +26,35 @@
* calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h.
*/
-static inline bool __arch_has_wmb_pmem(void)
-{
- return false;
-}
-
-static inline void arch_wmb_pmem(void)
-{
- BUG();
-}
-
-static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
- size_t n)
+static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
- size_t n)
+static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
{
BUG();
return -EFAULT;
}
-static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
BUG();
return 0;
}
-static inline void arch_clear_pmem(void __pmem *addr, size_t size)
+static inline void arch_clear_pmem(void *addr, size_t size)
{
BUG();
}
-static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
BUG();
}
-static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
+static inline void arch_invalidate_pmem(void *addr, size_t size)
{
BUG();
}
@@ -77,13 +65,6 @@ static inline bool arch_has_pmem_api(void)
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
-static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
- size_t size)
-{
- memcpy(dst, (void __force *) src, size);
- return 0;
-}
-
/*
* memcpy_from_pmem - read from persistent memory with error handling
* @dst: destination buffer
@@ -92,54 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
*
* Returns 0 on success negative error code on failure.
*/
-static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
- size_t size)
+static inline int memcpy_from_pmem(void *dst, void const *src, size_t size)
{
if (arch_has_pmem_api())
return arch_memcpy_from_pmem(dst, src, size);
else
- return default_memcpy_from_pmem(dst, src, size);
-}
-
-/**
- * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
- *
- * For a given cpu implementation within an architecture it is possible
- * that wmb_pmem() resolves to a nop. In the case this returns
- * false, pmem api users are unable to ensure durability and may want to
- * fall back to a different data consistency model, or otherwise notify
- * the user.
- */
-static inline bool arch_has_wmb_pmem(void)
-{
- return arch_has_pmem_api() && __arch_has_wmb_pmem();
-}
-
-/*
- * These defaults seek to offer decent performance and minimize the
- * window between i/o completion and writes being durable on media.
- * However, it is undefined / architecture specific whether
- * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for
- * making data durable relative to i/o completion.
- */
-static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
- size_t size)
-{
- memcpy((void __force *) dst, src, size);
-}
-
-static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
- size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter_nocache((void __force *)addr, bytes, i);
-}
-
-static inline void default_clear_pmem(void __pmem *addr, size_t size)
-{
- if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
- clear_page((void __force *)addr);
- else
- memset((void __force *)addr, 0, size);
+ memcpy(dst, src, size);
+ return 0;
}
/**
@@ -152,29 +92,14 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
* being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation
- * must be followed by a wmb_pmem().
+ * must be followed by a blkdev_issue_flush() on the pmem block device.
*/
-static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
+static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
{
if (arch_has_pmem_api())
arch_memcpy_to_pmem(dst, src, n);
else
- default_memcpy_to_pmem(dst, src, n);
-}
-
-/**
- * wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of memcpy_to_pmem() operations this drains data from
- * cpu write buffers and any platform (memory controller) buffers to
- * ensure that written data is durable on persistent memory media.
- */
-static inline void wmb_pmem(void)
-{
- if (arch_has_wmb_pmem())
- arch_wmb_pmem();
- else
- wmb();
+ memcpy(dst, src, n);
}
/**
@@ -184,14 +109,14 @@ static inline void wmb_pmem(void)
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
-static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
+static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
- return default_copy_from_iter_pmem(addr, bytes, i);
+ return copy_from_iter_nocache(addr, bytes, i);
}
/**
@@ -200,14 +125,14 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
-static inline void clear_pmem(void __pmem *addr, size_t size)
+static inline void clear_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
- default_clear_pmem(addr, size);
+ memset(addr, 0, size);
}
/**
@@ -218,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size)
* For platforms that support clearing poison this flushes any poisoned
* ranges out of the cache
*/
-static inline void invalidate_pmem(void __pmem *addr, size_t size)
+static inline void invalidate_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size);
@@ -230,9 +155,9 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
-static inline void wb_cache_pmem(void __pmem *addr, size_t size)
+static inline void wb_cache_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size);
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index c818772d9..d5d3d741f 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -79,7 +79,7 @@ posix_acl_release(struct posix_acl *acl)
extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
-extern int posix_acl_valid(const struct posix_acl *);
+extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
index 24f51db8a..89d3f1cb3 100644
--- a/include/linux/power/max8903_charger.h
+++ b/include/linux/power/max8903_charger.h
@@ -26,8 +26,8 @@
struct max8903_pdata {
/*
* GPIOs
- * cen, chg, flt, and usus are optional.
- * dok, dcm, and uok are not optional depending on the status of
+ * cen, chg, flt, dcm and usus are optional.
+ * dok and uok are not optional depending on the status of
* dc_valid and usb_valid.
*/
int cen; /* Charger Enable input */
@@ -41,7 +41,7 @@ struct max8903_pdata {
/*
* DC(Adapter/TA) is wired
* When dc_valid is true,
- * dok and dcm should be valid.
+ * dok should be valid.
*
* At least one of dc_valid or usb_valid should be true.
*/
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 751061790..396550331 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -248,6 +248,7 @@ struct power_supply {
struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
+ bool initialized;
atomic_t use_cnt;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f4da695fd..696a56be7 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -61,6 +61,11 @@ static inline void console_verbose(void)
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
}
+/* strlen("ratelimit") + 1 */
+#define DEVKMSG_STR_MAX_SIZE 10
+extern char devkmsg_log_str[];
+struct ctl_table;
+
struct va_format {
const char *fmt;
va_list *va;
@@ -108,11 +113,14 @@ struct va_format {
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format checking.
*/
-#define no_printk(fmt, ...) \
-do { \
- if (0) \
- printk(fmt, ##__VA_ARGS__); \
-} while (0)
+#define no_printk(fmt, ...) \
+({ \
+ do { \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
+ } while (0); \
+ 0; \
+})
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
@@ -172,6 +180,10 @@ extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
+extern int
+devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
+ size_t *lenp, loff_t *ppos);
+
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
@@ -286,10 +298,11 @@ extern asmlinkage void dump_stack(void) __cold;
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
-#include <linux/dynamic_debug.h>
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG)
+#include <linux/dynamic_debug.h>
+
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
@@ -309,20 +322,24 @@ extern asmlinkage void dump_stack(void) __cold;
#define printk_once(fmt, ...) \
({ \
static bool __print_once __read_mostly; \
+ bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
__print_once = true; \
printk(fmt, ##__VA_ARGS__); \
} \
+ unlikely(__ret_print_once); \
})
#define printk_deferred_once(fmt, ...) \
({ \
static bool __print_once __read_mostly; \
+ bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
__print_once = true; \
printk_deferred(fmt, ##__VA_ARGS__); \
} \
+ unlikely(__ret_print_once); \
})
#else
#define printk_once(fmt, ...) \
diff --git a/include/linux/property.h b/include/linux/property.h
index ecab11e40..856e50b21 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -77,6 +77,9 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname);
+
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
@@ -187,7 +190,7 @@ struct property_entry {
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
.is_string = false, \
- { .pointer = { _type_##_data = _val_ } }, \
+ { .pointer = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 831479f8d..899e95e84 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -58,7 +58,8 @@ struct pstore_info {
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi);
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi);
int (*write)(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, int count, bool compressed,
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
new file mode 100644
index 000000000..2052011bf
--- /dev/null
+++ b/include/linux/ptr_ring.h
@@ -0,0 +1,448 @@
+/*
+ * Definitions for the 'struct ptr_ring' datastructure.
+ *
+ * Author:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This is a limited-size FIFO maintaining pointers in FIFO order, with
+ * one CPU producing entries and another consuming entries from a FIFO.
+ *
+ * This implementation tries to minimize cache-contention when there is a
+ * single producer and a single consumer CPU.
+ */
+
+#ifndef _LINUX_PTR_RING_H
+#define _LINUX_PTR_RING_H 1
+
+#ifdef __KERNEL__
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <asm/errno.h>
+#endif
+
+struct ptr_ring {
+ int producer ____cacheline_aligned_in_smp;
+ spinlock_t producer_lock;
+ int consumer ____cacheline_aligned_in_smp;
+ spinlock_t consumer_lock;
+ /* Shared consumer/producer data */
+ /* Read-only by both the producer and the consumer */
+ int size ____cacheline_aligned_in_smp; /* max entries in queue */
+ void **queue;
+};
+
+/* Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax(). If ring is ever resized, callers must hold
+ * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
+ * producer_lock, the next call to __ptr_ring_produce may fail.
+ */
+static inline bool __ptr_ring_full(struct ptr_ring *r)
+{
+ return r->queue[r->producer];
+}
+
+static inline bool ptr_ring_full(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock(&r->producer_lock);
+ ret = __ptr_ring_full(r);
+ spin_unlock(&r->producer_lock);
+
+ return ret;
+}
+
+static inline bool ptr_ring_full_irq(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock_irq(&r->producer_lock);
+ ret = __ptr_ring_full(r);
+ spin_unlock_irq(&r->producer_lock);
+
+ return ret;
+}
+
+static inline bool ptr_ring_full_any(struct ptr_ring *r)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&r->producer_lock, flags);
+ ret = __ptr_ring_full(r);
+ spin_unlock_irqrestore(&r->producer_lock, flags);
+
+ return ret;
+}
+
+static inline bool ptr_ring_full_bh(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock_bh(&r->producer_lock);
+ ret = __ptr_ring_full(r);
+ spin_unlock_bh(&r->producer_lock);
+
+ return ret;
+}
+
+/* Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax(). Callers must hold producer_lock.
+ */
+static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
+{
+ if (unlikely(!r->size) || r->queue[r->producer])
+ return -ENOSPC;
+
+ r->queue[r->producer++] = ptr;
+ if (unlikely(r->producer >= r->size))
+ r->producer = 0;
+ return 0;
+}
+
+static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
+{
+ int ret;
+
+ spin_lock(&r->producer_lock);
+ ret = __ptr_ring_produce(r, ptr);
+ spin_unlock(&r->producer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
+{
+ int ret;
+
+ spin_lock_irq(&r->producer_lock);
+ ret = __ptr_ring_produce(r, ptr);
+ spin_unlock_irq(&r->producer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&r->producer_lock, flags);
+ ret = __ptr_ring_produce(r, ptr);
+ spin_unlock_irqrestore(&r->producer_lock, flags);
+
+ return ret;
+}
+
+static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
+{
+ int ret;
+
+ spin_lock_bh(&r->producer_lock);
+ ret = __ptr_ring_produce(r, ptr);
+ spin_unlock_bh(&r->producer_lock);
+
+ return ret;
+}
+
+/* Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax(). Callers must take consumer_lock
+ * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
+ * If ring is never resized, and if the pointer is merely
+ * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
+ */
+static inline void *__ptr_ring_peek(struct ptr_ring *r)
+{
+ if (likely(r->size))
+ return r->queue[r->consumer];
+ return NULL;
+}
+
+/* Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax(). Callers must take consumer_lock
+ * if the ring is ever resized - see e.g. ptr_ring_empty.
+ */
+static inline bool __ptr_ring_empty(struct ptr_ring *r)
+{
+ return !__ptr_ring_peek(r);
+}
+
+static inline bool ptr_ring_empty(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock(&r->consumer_lock);
+ ret = __ptr_ring_empty(r);
+ spin_unlock(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock_irq(&r->consumer_lock);
+ ret = __ptr_ring_empty(r);
+ spin_unlock_irq(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline bool ptr_ring_empty_any(struct ptr_ring *r)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ ret = __ptr_ring_empty(r);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+
+ return ret;
+}
+
+static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
+{
+ bool ret;
+
+ spin_lock_bh(&r->consumer_lock);
+ ret = __ptr_ring_empty(r);
+ spin_unlock_bh(&r->consumer_lock);
+
+ return ret;
+}
+
+/* Must only be called after __ptr_ring_peek returned !NULL */
+static inline void __ptr_ring_discard_one(struct ptr_ring *r)
+{
+ r->queue[r->consumer++] = NULL;
+ if (unlikely(r->consumer >= r->size))
+ r->consumer = 0;
+}
+
+static inline void *__ptr_ring_consume(struct ptr_ring *r)
+{
+ void *ptr;
+
+ ptr = __ptr_ring_peek(r);
+ if (ptr)
+ __ptr_ring_discard_one(r);
+
+ return ptr;
+}
+
+static inline void *ptr_ring_consume(struct ptr_ring *r)
+{
+ void *ptr;
+
+ spin_lock(&r->consumer_lock);
+ ptr = __ptr_ring_consume(r);
+ spin_unlock(&r->consumer_lock);
+
+ return ptr;
+}
+
+static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
+{
+ void *ptr;
+
+ spin_lock_irq(&r->consumer_lock);
+ ptr = __ptr_ring_consume(r);
+ spin_unlock_irq(&r->consumer_lock);
+
+ return ptr;
+}
+
+static inline void *ptr_ring_consume_any(struct ptr_ring *r)
+{
+ unsigned long flags;
+ void *ptr;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ ptr = __ptr_ring_consume(r);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+
+ return ptr;
+}
+
+static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
+{
+ void *ptr;
+
+ spin_lock_bh(&r->consumer_lock);
+ ptr = __ptr_ring_consume(r);
+ spin_unlock_bh(&r->consumer_lock);
+
+ return ptr;
+}
+
+/* Cast to structure type and call a function without discarding from FIFO.
+ * Function must return a value.
+ * Callers must take consumer_lock.
+ */
+#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
+
+#define PTR_RING_PEEK_CALL(r, f) ({ \
+ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
+ \
+ spin_lock(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
+ spin_unlock(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v; \
+})
+
+#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
+ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
+ \
+ spin_lock_irq(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
+ spin_unlock_irq(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v; \
+})
+
+#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
+ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
+ \
+ spin_lock_bh(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
+ spin_unlock_bh(&(r)->consumer_lock); \
+ __PTR_RING_PEEK_CALL_v; \
+})
+
+#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
+ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
+ unsigned long __PTR_RING_PEEK_CALL_f;\
+ \
+ spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
+ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
+ spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
+ __PTR_RING_PEEK_CALL_v; \
+})
+
+static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
+{
+ return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
+}
+
+static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
+{
+ r->queue = __ptr_ring_init_queue_alloc(size, gfp);
+ if (!r->queue)
+ return -ENOMEM;
+
+ r->size = size;
+ r->producer = r->consumer = 0;
+ spin_lock_init(&r->producer_lock);
+ spin_lock_init(&r->consumer_lock);
+
+ return 0;
+}
+
+static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
+ int size, gfp_t gfp,
+ void (*destroy)(void *))
+{
+ int producer = 0;
+ void **old;
+ void *ptr;
+
+ while ((ptr = ptr_ring_consume(r)))
+ if (producer < size)
+ queue[producer++] = ptr;
+ else if (destroy)
+ destroy(ptr);
+
+ r->size = size;
+ r->producer = producer;
+ r->consumer = 0;
+ old = r->queue;
+ r->queue = queue;
+
+ return old;
+}
+
+static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
+ void (*destroy)(void *))
+{
+ unsigned long flags;
+ void **queue = __ptr_ring_init_queue_alloc(size, gfp);
+ void **old;
+
+ if (!queue)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&(r)->producer_lock, flags);
+
+ old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
+
+ spin_unlock_irqrestore(&(r)->producer_lock, flags);
+
+ kfree(old);
+
+ return 0;
+}
+
+static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+ int size,
+ gfp_t gfp, void (*destroy)(void *))
+{
+ unsigned long flags;
+ void ***queues;
+ int i;
+
+ queues = kmalloc(nrings * sizeof *queues, gfp);
+ if (!queues)
+ goto noqueues;
+
+ for (i = 0; i < nrings; ++i) {
+ queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
+ if (!queues[i])
+ goto nomem;
+ }
+
+ for (i = 0; i < nrings; ++i) {
+ spin_lock_irqsave(&(rings[i])->producer_lock, flags);
+ queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
+ size, gfp, destroy);
+ spin_unlock_irqrestore(&(rings[i])->producer_lock, flags);
+ }
+
+ for (i = 0; i < nrings; ++i)
+ kfree(queues[i]);
+
+ kfree(queues);
+
+ return 0;
+
+nomem:
+ while (--i >= 0)
+ kfree(queues[i]);
+
+ kfree(queues);
+
+noqueues:
+ return -ENOMEM;
+}
+
+static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
+{
+ void *ptr;
+
+ if (destroy)
+ while ((ptr = ptr_ring_consume(r)))
+ destroy(ptr);
+ kfree(r->queue);
+}
+
+#endif /* _LINUX_PTR_RING_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index c038ae36b..f1bbae014 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -5,7 +5,9 @@
#include <linux/mutex.h>
#include <linux/of.h>
+struct pwm_capture;
struct seq_file;
+
struct pwm_chip;
/**
@@ -148,11 +150,100 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * @pwm: PWM device
+ * @state: state to fill with the prepared PWM state
+ *
+ * This functions prepares a state that can later be tweaked and applied
+ * to the PWM device with pwm_apply_state(). This is a convenient function
+ * that first retrieves the current PWM state and the replaces the period
+ * and polarity fields with the reference values defined in pwm->args.
+ * Once the function returns, you can adjust the ->enabled and ->duty_cycle
+ * fields according to your needs before calling pwm_apply_state().
+ *
+ * ->duty_cycle is initially set to zero to avoid cases where the current
+ * ->duty_cycle value exceed the pwm_args->period one, which would trigger
+ * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * first.
+ */
+static inline void pwm_init_state(const struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_args args;
+
+ /* First get the current state. */
+ pwm_get_state(pwm, state);
+
+ /* Then fill it with the reference config */
+ pwm_get_args(pwm, &args);
+
+ state->period = args.period;
+ state->polarity = args.polarity;
+ state->duty_cycle = 0;
+}
+
+/**
+ * pwm_get_relative_duty_cycle() - Get a relative duty cycle value
+ * @state: PWM state to extract the duty cycle from
+ * @scale: target scale of the relative duty cycle
+ *
+ * This functions converts the absolute duty cycle stored in @state (expressed
+ * in nanosecond) into a value relative to the period.
+ *
+ * For example if you want to get the duty_cycle expressed in percent, call:
+ *
+ * pwm_get_state(pwm, &state);
+ * duty = pwm_get_relative_duty_cycle(&state, 100);
+ */
+static inline unsigned int
+pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
+{
+ if (!state->period)
+ return 0;
+
+ return DIV_ROUND_CLOSEST_ULL((u64)state->duty_cycle * scale,
+ state->period);
+}
+
+/**
+ * pwm_set_relative_duty_cycle() - Set a relative duty cycle value
+ * @state: PWM state to fill
+ * @duty_cycle: relative duty cycle value
+ * @scale: scale in which @duty_cycle is expressed
+ *
+ * This functions converts a relative into an absolute duty cycle (expressed
+ * in nanoseconds), and puts the result in state->duty_cycle.
+ *
+ * For example if you want to configure a 50% duty cycle, call:
+ *
+ * pwm_init_state(pwm, &state);
+ * pwm_set_relative_duty_cycle(&state, 50, 100);
+ * pwm_apply_state(pwm, &state);
+ *
+ * This functions returns -EINVAL if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ */
+static inline int
+pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
+ unsigned int scale)
+{
+ if (!scale || duty_cycle > scale)
+ return -EINVAL;
+
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL((u64)duty_cycle *
+ state->period,
+ scale);
+
+ return 0;
+}
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @config: configure duty cycles and period length for this PWM
* @set_polarity: configure the polarity of this PWM
+ * @capture: capture and report PWM signal
* @enable: enable PWM output toggling
* @disable: disable PWM output toggling
* @apply: atomically apply a new PWM config. The state argument
@@ -172,6 +263,8 @@ struct pwm_ops {
int duty_ns, int period_ns);
int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity);
+ int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_capture *result, unsigned long timeout);
int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -212,6 +305,16 @@ struct pwm_chip {
bool can_sleep;
};
+/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
struct pwm_device *pwm_request(int pwm_id, const char *label);
@@ -323,8 +426,9 @@ static inline void pwm_disable(struct pwm_device *pwm)
pwm_apply_state(pwm, &state);
}
-
/* PWM provider APIs */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout);
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
@@ -376,6 +480,13 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
return -EINVAL;
}
+static inline int pwm_capture(struct pwm_device *pwm,
+ struct pwm_capture *result,
+ unsigned long timeout)
+{
+ return -EINVAL;
+}
+
static inline int pwm_set_polarity(struct pwm_device *pwm,
enum pwm_polarity polarity)
{
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 9e1200091..cc32ab852 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -29,6 +29,14 @@ extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
+ size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
+ phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 3f14c7efe..40c0ada01 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -12,10 +12,21 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
+#define NUM_OF_GLOBAL_QUEUES 128
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 0
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 7
-#define FW_REVISION_VERSION 3
+#define FW_MINOR_VERSION 10
+#define FW_REVISION_VERSION 5
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -97,45 +108,86 @@
#define DQ_XCM_AGG_VAL_SEL_REG6 7
/* XCM agg val selection */
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_CONS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_TX_BD_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* UCM agg val selection (HW) */
+#define DQ_UCM_AGG_VAL_SEL_WORD0 0
+#define DQ_UCM_AGG_VAL_SEL_WORD1 1
+#define DQ_UCM_AGG_VAL_SEL_WORD2 2
+#define DQ_UCM_AGG_VAL_SEL_WORD3 3
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define DQ_TCM_AGG_VAL_SEL_WORD0 0
+#define DQ_TCM_AGG_VAL_SEL_WORD1 1
+#define DQ_TCM_AGG_VAL_SEL_WORD2 2
+#define DQ_TCM_AGG_VAL_SEL_WORD3 3
+#define DQ_TCM_AGG_VAL_SEL_REG1 4
+#define DQ_TCM_AGG_VAL_SEL_REG2 5
+#define DQ_TCM_AGG_VAL_SEL_REG6 6
+#define DQ_TCM_AGG_VAL_SEL_REG9 7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD \
+ DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD \
+ DQ_TCM_AGG_VAL_SEL_WORD0
/* XCM agg counter flag selection */
-#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
-#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
-#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
-#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
-#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
-#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
-#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
-#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
-#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/* UCM agg counter flag selection (HW) */
+#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
+#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
+#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
+#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
+#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
+#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+#define DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define DQ_DPM_WQE_BUFF_SIZE (320)
+
+/* Conn type ranges */
+#define DQ_CONN_TYPE_RANGE_SHIFT (4)
/*****************/
/* QM CONSTANTS */
@@ -282,8 +334,6 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -342,6 +392,9 @@
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
@@ -379,6 +432,38 @@ struct async_data {
u8 fw_debug_param;
};
+struct coalescing_timeset {
+ u8 value;
+#define COALESCING_TIMESET_TIMESET_MASK 0x7F
+#define COALESCING_TIMESET_TIMESET_SHIFT 0
+#define COALESCING_TIMESET_VALID_MASK 0x1
+#define COALESCING_TIMESET_VALID_SHIFT 7
+};
+
+struct common_prs_pf_msg_info {
+ __le32 value;
+#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
+#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
+#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
+};
+
+struct common_queue_zone {
+ __le16 ring_drv_data_consumer;
+ __le16 reserved;
+};
+
+struct eth_rx_prod_data {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -388,11 +473,23 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
+struct malicious_vf_eqe_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved[3];
+};
+
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
/* Event Data Union */
union event_ring_data {
- u8 bytes[8];
- struct vf_pf_channel_eqe_data vf_pf_channel;
- struct async_data async_info;
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct malicious_vf_eqe_data malicious_vf;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
};
/* Event Ring Entry */
@@ -420,9 +517,9 @@ enum mf_mode {
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_RESERVED1,
+ PROTOCOLID_ISCSI,
PROTOCOLID_RESERVED2,
- PROTOCOLID_RESERVED3,
+ PROTOCOLID_ROCE,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_RESERVED4,
@@ -433,6 +530,16 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+struct ustorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[3];
+};
+
+struct ustorm_queue_zone {
+ struct ustorm_eth_queue_zone eth;
+ struct common_queue_zone common;
+};
+
/* status block structure */
struct cau_pi_entry {
u32 prod;
@@ -588,7 +695,10 @@ struct parsing_and_err_flags {
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
-/* Concrete Function ID. */
+struct pb_context {
+ __le32 crc[4];
+};
+
struct pxp_concrete_fid {
__le16 fid;
#define PXP_CONCRETE_FID_PFID_MASK 0xF
@@ -655,6 +765,72 @@ struct pxp_ptt_entry {
};
/* RSS hash type */
+struct rdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+ u8 partial_dif_data[7];
+ __le16 partial_crc_value;
+ __le16 partial_checksum_value;
+ __le32 offset_in_io;
+ __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+ __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+ __le32 reserved2;
+};
+
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
@@ -683,19 +859,122 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
-struct tunnel_parsing_flags {
- u8 flags;
-#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+struct tdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ __le16 partial_crc_valueB;
+ __le16 partial_checksum_valueB;
+ __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+ u8 reserved1;
+ u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+ __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_iob;
+ __le16 partial_crc_value_a;
+ __le16 partial_checksum_valuea_;
+ __le32 offset_in_ioa;
+ u8 partial_dif_data_a[8];
+ u8 partial_dif_data_b[8];
+};
+
+struct timers_context {
+ __le32 logical_client0;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+ __le32 logical_client1;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+ __le32 logical_client2;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+ __le32 host_expiration_fields;
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
};
#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 092cb0c1a..b5ebc697d 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -12,6 +12,8 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
+#define ETH_HSI_VER_MAJOR 3
+#define ETH_HSI_VER_MINOR 0
#define ETH_CACHE_LINE_SIZE 64
#define ETH_MAX_RAMROD_PER_CON 8
@@ -57,19 +59,6 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
-/* Queue Zone sizes */
-#define TSTORM_QZONE_SIZE 0
-#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
-#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
-#define XSTORM_QZONE_SIZE 0
-#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
-#define PSTORM_QZONE_SIZE 0
-
-/* Interrupt coalescing TimeSet */
-struct coalescing_timeset {
- u8 timeset;
- u8 valid;
-};
struct eth_tx_1st_bd_flags {
u8 bitfields;
@@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd {
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
/* The parsing information data for the second tx bd of a given packet. */
@@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
+struct eth_fast_path_cqe_fw_debug {
+ u8 reserved0;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* tunneling parsing flags */
+struct eth_tunnel_parsing_flags {
+ u8 flags;
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
/* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe {
- u8 type;
- u8 bitfields;
+ u8 type;
+ u8 bitfields;
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
- __le16 pkt_len;
- struct parsing_and_err_flags pars_flags;
- __le16 vlan_tag;
- __le32 rss_hash;
- __le16 len_on_first_bd;
- u8 placement_offset;
- struct tunnel_parsing_flags tunnel_pars_flags;
- u8 bd_num;
- u8 reserved[7];
- u32 fw_debug;
- u8 reserved1[3];
- u8 flags;
+ __le16 pkt_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num;
+ u8 reserved[7];
+ struct eth_fast_path_cqe_fw_debug fw_debug;
+ u8 reserved1[3];
+ u8 flags;
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
@@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe {
__le32 rss_hash;
__le16 len_on_first_bd;
u8 placement_offset;
- struct tunnel_parsing_flags tunnel_pars_flags;
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- u32 fw_debug;
+ struct eth_fast_path_cqe_fw_debug fw_debug;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -264,12 +276,25 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
-/* ETH Rx producers data */
-struct eth_rx_prod_data {
- __le16 bd_prod;
- __le16 cqe_prod;
- __le16 reserved;
- __le16 reserved1;
+enum eth_rx_tunn_type {
+ ETH_RX_NO_TUNN,
+ ETH_RX_TUNN_GENEVE,
+ ETH_RX_TUNN_GRE,
+ ETH_RX_TUNN_VXLAN,
+ MAX_ETH_RX_TUNN_TYPE
+};
+
+/* Aggregation end reason. */
+enum eth_tpa_end_reason {
+ ETH_AGG_END_UNUSED,
+ ETH_AGG_END_SP_UPDATE,
+ ETH_AGG_END_MAX_LEN,
+ ETH_AGG_END_LAST_SEG,
+ ETH_AGG_END_TIMEOUT,
+ ETH_AGG_END_NOT_CONSISTENT,
+ ETH_AGG_END_OUT_OF_ORDER,
+ ETH_AGG_END_NON_TPA_SEG,
+ MAX_ETH_TPA_END_REASON
};
/* The first tx bd of a given packet */
@@ -337,21 +362,18 @@ union eth_tx_bd_types {
};
/* Mstorm Queue Zone */
-struct mstorm_eth_queue_zone {
- struct eth_rx_prod_data rx_producers;
- __le32 reserved[2];
-};
-
-/* Ustorm Queue Zone */
-struct ustorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset;
- __le16 reserved[3];
+enum eth_tx_tunn_type {
+ ETH_TX_TUNN_GENEVE,
+ ETH_TX_TUNN_TTAG,
+ ETH_TX_TUNN_GRE,
+ ETH_TX_TUNN_VXLAN,
+ MAX_ETH_TX_TUNN_TYPE
};
/* Ystorm Queue Zone */
-struct ystorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset;
- __le16 reserved[3];
+struct xstorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[7];
};
/* ETH doorbell data */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
new file mode 100644
index 000000000..b3c0feb15
--- /dev/null
+++ b/include/linux/qed/iscsi_common.h
@@ -0,0 +1,1439 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ISCSI_COMMON__
+#define __ISCSI_COMMON__
+/**********************/
+/* ISCSI FW CONSTANTS */
+/**********************/
+
+/* iSCSI HSI constants */
+#define ISCSI_DEFAULT_MTU (1500)
+
+/* Current iSCSI HSI version number composed of two fields (16 bit) */
+#define ISCSI_HSI_MAJOR_VERSION (0)
+#define ISCSI_HSI_MINOR_VERSION (0)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_SLOW_PATH_LAYER_CODE (6)
+
+/* CQE completion status */
+#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
+#define ISCSI_EQE_RST_CONN_RCVD (0x1)
+
+/* iSCSI parameter defaults */
+#define ISCSI_DEFAULT_HEADER_DIGEST (0)
+#define ISCSI_DEFAULT_DATA_DIGEST (0)
+#define ISCSI_DEFAULT_INITIAL_R2T (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+
+/* iSCSI parameter limits */
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+
+/* iSCSI reserved params */
+#define ISCSI_ITT_ALL_ONES (0xffffffff)
+#define ISCSI_TTT_ALL_ONES (0xffffffff)
+
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0)
+#define ISCSI_OPCODE_NOP_OUT ( \
+ ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
+#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1)
+#define ISCSI_OPCODE_SCSI_CMD ( \
+ ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2)
+#define ISCSI_OPCODE_TMF_REQUEST ( \
+ ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3)
+#define ISCSI_OPCODE_LOGIN_REQUEST ( \
+ ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4)
+#define ISCSI_OPCODE_TEXT_REQUEST ( \
+ ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_DATA_OUT (5)
+#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6)
+#define ISCSI_OPCODE_LOGOUT_REQUEST ( \
+ ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_NOP_IN (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
+#define ISCSI_OPCODE_DATA_IN (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
+#define ISCSI_OPCODE_R2T (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG (0x32)
+#define ISCSI_OPCODE_REJECT (0x3f)
+
+/* iSCSI stages */
+#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
+
+/* iSCSI CQE errors */
+#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+
+struct cqe_error_bitmap {
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
+};
+
+union cqe_error_status {
+ u8 error_status;
+ struct cqe_error_bitmap error_bits;
+};
+
+struct data_hdr {
+ __le32 data[12];
+};
+
+struct iscsi_async_msg_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 all_ones;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 param1_rsrv;
+ u8 async_vcode;
+ u8 async_event;
+ __le16 param3_rsrv;
+ __le16 param2_rsrv;
+ __le32 reserved7;
+};
+
+struct iscsi_sge {
+ struct regpair sge_addr;
+ __le16 sge_len;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct iscsi_cached_sge_ctx {
+ struct iscsi_sge sge;
+ struct regpair reserved;
+ __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 cdb[4];
+};
+
+struct iscsi_common_hdr {
+ u8 hdr_status;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 hdr_first_byte;
+#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
+#define ISCSI_COMMON_HDR_IMM_MASK 0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT 6
+#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
+ __le32 hdr_second_dword;
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 lun_reserved[4];
+ __le32 data[6];
+};
+
+struct iscsi_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le32 initial_ack;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
+ u8 pbl_page_size_log;
+ u8 pbe_page_size_log;
+ u8 default_cq;
+ __le32 stat_sn;
+};
+
+struct iscsi_slow_path_hdr {
+ u8 op_code;
+ u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
+};
+
+struct iscsi_conn_update_ramrod_params {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
+ u8 reserved0[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 exp_stat_sn;
+};
+
+struct iscsi_ext_cdb_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ struct iscsi_sge cdb_sge;
+};
+
+struct iscsi_login_req_hdr {
+ u8 version_min;
+ u8 version_max;
+ u8 flags_attr;
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 isid_TABC;
+ __le16 tsih;
+ __le16 isid_d;
+ __le32 itt;
+ __le16 reserved1;
+ __le16 cid;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved2[4];
+};
+
+struct iscsi_logout_req_hdr {
+ __le16 reserved0;
+ u8 reason_code;
+ u8 opcode;
+ __le32 reserved1;
+ __le32 reserved2[2];
+ __le32 itt;
+ __le16 reserved3;
+ __le16 cid;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved4[4];
+};
+
+struct iscsi_data_out_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 reserved3;
+ __le32 exp_stat_sn;
+ __le32 reserved4;
+ __le32 data_sn;
+ __le32 buffer_offset;
+ __le32 reserved5;
+};
+
+struct iscsi_data_in_hdr {
+ u8 status_rsvd;
+ u8 reserved1;
+ u8 flags;
+#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
+#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data_sn;
+ __le32 buffer_offset;
+ __le32 residual_count;
+};
+
+struct iscsi_r2t_hdr {
+ u8 reserved0[3];
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 r2t_sn;
+ __le32 buffer_offset;
+ __le32 desired_data_trns_len;
+};
+
+struct iscsi_nop_out_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 reserved5;
+ __le32 reserved6;
+};
+
+struct iscsi_nop_in_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+};
+
+struct iscsi_login_response_hdr {
+ u8 version_active;
+ u8 version_max;
+ u8 flags_attr;
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 isid_TABC;
+ __le16 tsih;
+ __le16 isid_d;
+ __le32 itt;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 reserved2;
+ u8 status_detail;
+ u8 status_class;
+ __le32 reserved4[2];
+};
+
+struct iscsi_logout_response_hdr {
+ u8 reserved1;
+ u8 response;
+ u8 flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 reserved2[2];
+ __le32 itt;
+ __le32 reserved3;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4;
+ __le16 time2retain;
+ __le16 time2wait;
+ __le32 reserved5[1];
+};
+
+struct iscsi_text_request_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved4[4];
+};
+
+struct iscsi_text_response_hdr {
+ __le16 reserved1;
+ u8 flags;
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4[3];
+};
+
+struct iscsi_tmf_request_hdr {
+ __le16 reserved0;
+ u8 function;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 rtt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 ref_cmd_sn;
+ __le32 exp_data_sn;
+ __le32 reserved4[2];
+};
+
+struct iscsi_tmf_response_hdr {
+ u8 reserved2;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair reserved0;
+ __le32 itt;
+ __le32 rtt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4[3];
+};
+
+struct iscsi_response_hdr {
+ u8 hdr_status;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 snack_tag;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 exp_data_sn;
+ __le32 bi_residual_count;
+ __le32 residual_count;
+};
+
+struct iscsi_reject_hdr {
+ u8 reserved4;
+ u8 hdr_reason;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data_sn;
+ __le32 reserved3[2];
+};
+
+union iscsi_task_hdr {
+ struct iscsi_common_hdr common;
+ struct data_hdr data;
+ struct iscsi_cmd_hdr cmd;
+ struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd;
+ struct iscsi_login_req_hdr login_req;
+ struct iscsi_logout_req_hdr logout_req;
+ struct iscsi_data_out_hdr data_out;
+ struct iscsi_data_in_hdr data_in;
+ struct iscsi_r2t_hdr r2t;
+ struct iscsi_nop_out_hdr nop_out;
+ struct iscsi_nop_in_hdr nop_in;
+ struct iscsi_login_response_hdr login_response;
+ struct iscsi_logout_response_hdr logout_response;
+ struct iscsi_text_request_hdr text_request;
+ struct iscsi_text_response_hdr text_response;
+ struct iscsi_tmf_request_hdr tmf_request;
+ struct iscsi_tmf_response_hdr tmf_response;
+ struct iscsi_response_hdr response;
+ struct iscsi_reject_hdr reject;
+ struct iscsi_async_msg_hdr async_msg;
+};
+
+struct iscsi_cqe_common {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le32 reserved[3];
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_solicited {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ __le32 reserved1[2];
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_unsolicited {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le16 reserved0;
+ u8 reserved1;
+ u8 unsol_cqe_type;
+ struct regpair rqe_opaque;
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+union iscsi_cqe {
+ struct iscsi_cqe_common cqe_common;
+ struct iscsi_cqe_solicited cqe_solicited;
+ struct iscsi_cqe_unsolicited cqe_unsolicited;
+};
+
+enum iscsi_cqes_type {
+ ISCSI_CQE_TYPE_SOLICITED = 1,
+ ISCSI_CQE_TYPE_UNSOLICITED,
+ ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
+ ,
+ ISCSI_CQE_TYPE_TASK_CLEANUP,
+ ISCSI_CQE_TYPE_DUMMY,
+ MAX_ISCSI_CQES_TYPE
+};
+
+enum iscsi_cqe_unsolicited_type {
+ ISCSI_CQE_UNSOLICITED_NONE,
+ ISCSI_CQE_UNSOLICITED_SINGLE,
+ ISCSI_CQE_UNSOLICITED_FIRST,
+ ISCSI_CQE_UNSOLICITED_MIDDLE,
+ ISCSI_CQE_UNSOLICITED_LAST,
+ MAX_ISCSI_CQE_UNSOLICITED_TYPE
+};
+
+struct iscsi_virt_sgl_ctx {
+ struct regpair sgl_base;
+ struct regpair dsgl_base;
+ __le32 sgl_initial_offset;
+ __le32 dsgl_initial_offset;
+ __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_sgl_var_params {
+ u8 sgl_ptr;
+ u8 dsgl_ptr;
+ __le16 sge_offset;
+ __le16 dsge_offset;
+};
+
+struct iscsi_phys_sgl_ctx {
+ struct regpair sgl_base;
+ struct regpair dsgl_base;
+ u8 sgl_size;
+ u8 dsgl_size;
+ __le16 reserved;
+ struct iscsi_sgl_var_params var_params[2];
+};
+
+union iscsi_data_desc_ctx {
+ struct iscsi_virt_sgl_ctx virt_sgl;
+ struct iscsi_phys_sgl_ctx phys_sgl;
+ struct iscsi_cached_sge_ctx cached_sge;
+};
+
+struct iscsi_debug_modes {
+ u8 flags;
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
+#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
+#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
+};
+
+struct iscsi_dif_flags {
+ u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
+};
+
+enum iscsi_eqe_opcode {
+ ISCSI_EVENT_TYPE_INIT_FUNC = 0,
+ ISCSI_EVENT_TYPE_DESTROY_FUNC,
+ ISCSI_EVENT_TYPE_OFFLOAD_CONN,
+ ISCSI_EVENT_TYPE_UPDATE_CONN,
+ ISCSI_EVENT_TYPE_CLEAR_SQ,
+ ISCSI_EVENT_TYPE_TERMINATE_CONN,
+ ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
+ ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
+ RESERVED8,
+ RESERVED9,
+ ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
+ ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
+ ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
+ ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
+ ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
+ ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
+ ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
+ ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
+ MAX_ISCSI_EQE_OPCODE
+};
+
+enum iscsi_error_types {
+ ISCSI_STATUS_NONE = 0,
+ ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
+ ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ ISCSI_CONN_ERROR_DATA_OVERRUN,
+ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
+ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
+ ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+ ISCSI_ERROR_UNKNOWN,
+ MAX_ISCSI_ERROR_TYPES
+};
+
+struct iscsi_mflags {
+ u8 mflags;
+#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
+#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
+#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
+#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
+#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
+#define ISCSI_MFLAGS_RESERVED_SHIFT 2
+};
+
+struct iscsi_sgl {
+ struct regpair sgl_addr;
+ __le16 updated_sge_size;
+ __le16 updated_sge_offset;
+ __le32 byte_offset;
+};
+
+union iscsi_mstorm_sgl {
+ struct iscsi_sgl sgl_struct;
+ struct iscsi_sge single_sge;
+};
+
+enum iscsi_ramrod_cmd_id {
+ ISCSI_RAMROD_CMD_ID_UNUSED = 0,
+ ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
+ ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_ISCSI_RAMROD_CMD_ID
+};
+
+struct iscsi_reg1 {
+ __le32 reg1_map;
+#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 3
+};
+
+union iscsi_seq_num {
+ __le16 data_sn;
+ __le16 r2t_sn;
+};
+
+struct iscsi_spe_conn_offload {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct iscsi_conn_offload_params iscsi;
+ struct tcp_offload_params tcp;
+};
+
+struct iscsi_spe_conn_offload_option2 {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct iscsi_conn_offload_params iscsi;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+struct iscsi_spe_conn_termination {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 abortive;
+ u8 reserved0[7];
+ struct regpair queue_cnts_addr;
+ struct regpair query_params_addr;
+};
+
+struct iscsi_spe_func_dstry {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct iscsi_spe_func_init {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 ooo_enable;
+ struct iscsi_debug_modes debug_mode;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le32 reserved4;
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+struct ystorm_iscsi_task_state {
+ union iscsi_data_desc_ctx sgl_ctx_union;
+ __le32 buffer_offset[2];
+ __le16 bytes_nxt_dif;
+ __le16 rxmit_bytes_nxt_dif;
+ union iscsi_seq_num seq_num_union;
+ u8 dif_bytes_leftover;
+ u8 rxmit_dif_bytes_leftover;
+ __le16 reuse_count;
+ struct iscsi_dif_flags dif_flags;
+ u8 local_comp;
+ __le32 exp_r2t_sn;
+ __le32 sgl_offset[2];
+};
+
+struct ystorm_iscsi_task_st_ctx {
+ struct ystorm_iscsi_task_state state;
+ union iscsi_task_hdr pdu_hdr;
+};
+
+struct ystorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 TTT;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct mstorm_iscsi_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct ustorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ u8 next_tid_valid;
+ u8 byte3;
+ __le16 word1;
+ __le16 next_tid;
+ __le16 word3;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct mstorm_iscsi_task_st_ctx {
+ union iscsi_mstorm_sgl sgl_union;
+ struct iscsi_dif_flags dif_flags;
+ struct iscsi_mflags flags;
+ u8 sgl_size;
+ u8 host_sge_index;
+ __le16 dix_cur_sge_offset;
+ __le16 dix_cur_sge_size;
+ __le32 data_offset_rtid;
+ u8 dif_offset;
+ u8 dix_sgl_size;
+ u8 dix_sge_index;
+ u8 task_type;
+ struct regpair sense_db;
+ struct regpair dix_sgl_cur_sge;
+ __le32 rem_task_size;
+ __le16 reuse_count;
+ __le16 dif_data_residue;
+ u8 reserved0[4];
+ __le32 reserved1[1];
+};
+
+struct ustorm_iscsi_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair lun;
+ struct iscsi_reg1 reg1;
+ u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
+ u8 reserved2;
+ __le16 reserved3;
+ __le32 reserved4;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
+ u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
+ u8 cq_rss_number;
+};
+
+struct iscsi_task_context {
+ struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct tdif_task_context tdif_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+ struct rdif_task_context rdif_context;
+};
+
+enum iscsi_task_type {
+ ISCSI_TASK_TYPE_INITIATOR_WRITE,
+ ISCSI_TASK_TYPE_INITIATOR_READ,
+ ISCSI_TASK_TYPE_MIDPATH,
+ ISCSI_TASK_TYPE_UNSOLIC,
+ ISCSI_TASK_TYPE_EXCHCLEANUP,
+ ISCSI_TASK_TYPE_IRRELEVANT,
+ ISCSI_TASK_TYPE_TARGET_WRITE,
+ ISCSI_TASK_TYPE_TARGET_READ,
+ ISCSI_TASK_TYPE_TARGET_RESPONSE,
+ ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+ MAX_ISCSI_TASK_TYPE
+};
+
+union iscsi_ttt_txlen_union {
+ __le32 desired_tx_len;
+ __le32 ttt;
+};
+
+struct iscsi_uhqe {
+ __le32 reg1;
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
+#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
+ __le32 reg2;
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
+};
+
+struct iscsi_wqe_field {
+ __le32 contlen_cdbsize_field;
+#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
+};
+
+union iscsi_wqe_field_union {
+ struct iscsi_wqe_field cont_field;
+ __le32 prev_tid;
+};
+
+struct iscsi_wqe {
+ __le16 task_id;
+ u8 flags;
+#define ISCSI_WQE_WQE_TYPE_MASK 0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT 0
+#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
+#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
+#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_RESPONSE_MASK 0x1
+#define ISCSI_WQE_RESPONSE_SHIFT 7
+ struct iscsi_dif_flags prot_flags;
+ union iscsi_wqe_field_union cont_prevtid_union;
+};
+
+enum iscsi_wqe_type {
+ ISCSI_WQE_TYPE_NORMAL,
+ ISCSI_WQE_TYPE_TASK_CLEANUP,
+ ISCSI_WQE_TYPE_MIDDLE_PATH,
+ ISCSI_WQE_TYPE_LOGIN,
+ ISCSI_WQE_TYPE_FIRST_R2T_CONT,
+ ISCSI_WQE_TYPE_NONFIRST_R2T_CONT,
+ ISCSI_WQE_TYPE_RESPONSE,
+ MAX_ISCSI_WQE_TYPE
+};
+
+struct iscsi_xhqe {
+ union iscsi_ttt_txlen_union ttt_or_txlen;
+ __le32 exp_stat_sn;
+ struct iscsi_dif_flags prot_flags;
+ u8 total_ahs_length;
+ u8 opcode;
+ u8 flags;
+#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 3
+#define ISCSI_XHQE_SUPER_IO_MASK 0x1
+#define ISCSI_XHQE_SUPER_IO_SHIFT 4
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
+#define ISCSI_XHQE_RESERVED_MASK 0x3
+#define ISCSI_XHQE_RESERVED_SHIFT 6
+ union iscsi_seq_num seq_num_union;
+ __le16 reserved1;
+};
+
+struct mstorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+};
+
+struct ooo_opaque {
+ __le32 cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+};
+
+struct pstorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_bytes_cnt;
+ struct regpair iscsi_tx_packet_cnt;
+};
+
+struct tstorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_bytes_cnt;
+ struct regpair iscsi_rx_packet_cnt;
+ struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+ __le32 iscsi_cmdq_threshold_cnt;
+ __le32 iscsi_rq_threshold_cnt;
+ __le32 iscsi_immq_threshold_cnt;
+};
+
+struct ustorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_data_pdu_cnt;
+ struct regpair iscsi_rx_r2t_pdu_cnt;
+ struct regpair iscsi_rx_total_pdu_cnt;
+};
+
+struct xstorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_go_to_slow_start_event_cnt;
+ struct regpair iscsi_tx_fast_retransmit_event_cnt;
+};
+
+struct ystorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_data_pdu_cnt;
+ struct regpair iscsi_tx_r2t_pdu_cnt;
+ struct regpair iscsi_tx_total_pdu_cnt;
+};
+
+struct iscsi_db_data {
+ u8 params;
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
+
+struct tstorm_iscsi_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 5f8fcaaa6..7e441bdea 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -25,10 +25,9 @@
} while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
@@ -47,16 +46,56 @@ enum qed_chain_use_mode {
QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
+enum qed_chain_cnt_type {
+ /* The chain's size/prod/cons are kept in 16-bit variables */
+ QED_CHAIN_CNT_TYPE_U16,
+
+ /* The chain's size/prod/cons are kept in 32-bit variables */
+ QED_CHAIN_CNT_TYPE_U32,
+};
+
struct qed_chain_next {
struct regpair next_phys;
void *next_virt;
};
+struct qed_chain_pbl_u16 {
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct qed_chain_pbl_u32 {
+ u32 prod_page_idx;
+ u32 cons_page_idx;
+};
+
struct qed_chain_pbl {
+ /* Base address of a pre-allocated buffer for pbl */
dma_addr_t p_phys_table;
void *p_virt_table;
- u16 prod_page_idx;
- u16 cons_page_idx;
+
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ /* Index to current used page by producer/consumer */
+ union {
+ struct qed_chain_pbl_u16 pbl16;
+ struct qed_chain_pbl_u32 pbl32;
+ } u;
+};
+
+struct qed_chain_u16 {
+ /* Cyclic index of next element to produce/consme */
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct qed_chain_u32 {
+ /* Cyclic index of next element to produce/consme */
+ u32 prod_idx;
+ u32 cons_idx;
};
struct qed_chain {
@@ -64,13 +103,25 @@ struct qed_chain {
dma_addr_t p_phys_addr;
void *p_prod_elem;
void *p_cons_elem;
- u16 page_cnt;
+
enum qed_chain_mode mode;
enum qed_chain_use_mode intended_use; /* used to produce/consume */
- u16 capacity; /*< number of _usable_ elements */
- u16 size; /* number of elements */
- u16 prod_idx;
- u16 cons_idx;
+ enum qed_chain_cnt_type cnt_type;
+
+ union {
+ struct qed_chain_u16 chain16;
+ struct qed_chain_u32 chain32;
+ } u;
+
+ u32 page_cnt;
+
+ /* Number of elements - capacity is for usable elements only,
+ * while size will contain total number of elements [for entire chain].
+ */
+ u32 capacity;
+ u32 size;
+
+ /* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_unusable;
@@ -96,66 +147,69 @@ struct qed_chain {
#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+
/* Accessors */
static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
{
- return p_chain->prod_idx;
+ return p_chain->u.chain16.prod_idx;
}
static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
{
- return p_chain->cons_idx;
+ return p_chain->u.chain16.cons_idx;
+}
+
+static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+{
+ return p_chain->u.chain32.cons_idx;
}
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
u16 used;
- /* we don't need to trancate upon assignmet, as we assign u32->u16 */
- used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
- (u32)p_chain->cons_idx;
+ used = (u16) (((u32)0x10000 +
+ (u32)p_chain->u.chain16.prod_idx) -
+ (u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->prod_idx / p_chain->elem_per_page -
- p_chain->cons_idx / p_chain->elem_per_page;
+ used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
- return p_chain->capacity - used;
+ return (u16)(p_chain->capacity - used);
}
-static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
- return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
-}
+ u32 used;
-static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
-{
- return qed_chain_get_elem_left(p_chain) == 0;
-}
+ used = (u32) (((u64)0x100000000ULL +
+ (u64)p_chain->u.chain32.prod_idx) -
+ (u64)p_chain->u.chain32.cons_idx);
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
-static inline u16 qed_chain_get_elem_per_page(
- struct qed_chain *p_chain)
-{
- return p_chain->elem_per_page;
+ return p_chain->capacity - used;
}
-static inline u16 qed_chain_get_usable_per_page(
- struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
{
return p_chain->usable_per_page;
}
-static inline u16 qed_chain_get_unusable_per_page(
- struct qed_chain *p_chain)
+static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
{
return p_chain->elem_unusable;
}
-static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
{
- return p_chain->size;
+ return p_chain->page_cnt;
}
-static inline dma_addr_t
-qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
return p_chain->pbl.p_phys_table;
}
@@ -172,65 +226,63 @@ qed_chain_get_pbl_phys(struct qed_chain *p_chain)
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
- void **p_next_elem,
- u16 *idx_to_inc,
- u16 *page_to_inc)
+ void **p_next_elem, void *idx_to_inc, void *page_to_inc)
{
+ struct qed_chain_next *p_next = NULL;
+ u32 page_index = 0;
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
- {
- struct qed_chain_next *p_next = *p_next_elem;
+ p_next = *p_next_elem;
*p_next_elem = p_next->next_virt;
- *idx_to_inc += p_chain->elem_unusable;
+ if (is_chain_u16(p_chain))
+ *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ else
+ *(u32 *)idx_to_inc += p_chain->elem_unusable;
break;
- }
case QED_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
break;
case QED_CHAIN_MODE_PBL:
- /* It is assumed pages are sequential, next element needs
- * to change only when passing going back to first from last.
- */
- if (++(*page_to_inc) == p_chain->page_cnt) {
- *page_to_inc = 0;
- *p_next_elem = p_chain->p_virt_addr;
+ if (is_chain_u16(p_chain)) {
+ if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+ *(u16 *)page_to_inc = 0;
+ page_index = *(u16 *)page_to_inc;
+ } else {
+ if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
}
+ *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
}
}
#define is_unusable_idx(p, idx) \
- (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+ (p)->usable_per_page)
-#define is_unusable_next_idx(p, idx) \
- ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+#define is_unusable_next_idx_u32(p, idx) \
+ ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
+ (p)->usable_per_page)
-#define test_ans_skip(p, idx) \
+#define test_and_skip(p, idx) \
do { \
- if (is_unusable_idx(p, idx)) { \
- (p)->idx += (p)->elem_unusable; \
+ if (is_chain_u16(p)) { \
+ if (is_unusable_idx(p, idx)) \
+ (p)->u.chain16.idx += (p)->elem_unusable; \
+ } else { \
+ if (is_unusable_idx_u32(p, idx)) \
+ (p)->u.chain32.idx += (p)->elem_unusable; \
} \
} while (0)
/**
- * @brief qed_chain_return_multi_produced -
- *
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
- *
- * @param p_chain
- * @param num
- */
-static inline void
-qed_chain_return_multi_produced(struct qed_chain *p_chain,
- u16 num)
-{
- p_chain->cons_idx += num;
- test_ans_skip(p_chain, cons_idx);
-}
-
-/**
* @brief qed_chain_return_produced -
*
* A chain in which the driver "Produces" elements should use this API
@@ -240,8 +292,11 @@ qed_chain_return_multi_produced(struct qed_chain *p_chain,
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
- p_chain->cons_idx++;
- test_ans_skip(p_chain, cons_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx++;
+ else
+ p_chain->u.chain32.cons_idx++;
+ test_and_skip(p_chain, cons_idx);
}
/**
@@ -257,21 +312,33 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
- p_chain->next_page_mask) {
- qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
- &p_chain->prod_idx,
- &p_chain->pbl.prod_page_idx);
+ void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain16.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain16.prod_idx++;
+ } else {
+ if ((p_chain->u.chain32.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain32.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain32.prod_idx++;
}
- ret = p_chain->p_prod_elem;
- p_chain->prod_idx++;
+ p_ret = p_chain->p_prod_elem;
p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
p_chain->elem_size);
- return ret;
+ return p_ret;
}
/**
@@ -282,9 +349,9 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
* @param p_chain
* @param num
*
- * @return u16, number of unusable BDs
+ * @return number of unusable BDs
*/
-static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
return p_chain->capacity;
}
@@ -297,11 +364,13 @@ static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
*
* @param p_chain
*/
-static inline void
-qed_chain_recycle_consumed(struct qed_chain *p_chain)
+static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
- test_ans_skip(p_chain, prod_idx);
- p_chain->prod_idx++;
+ test_and_skip(p_chain, prod_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx++;
+ else
+ p_chain->u.chain32.prod_idx++;
}
/**
@@ -316,21 +385,33 @@ qed_chain_recycle_consumed(struct qed_chain *p_chain)
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
- p_chain->next_page_mask) {
+ void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain16.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain16.cons_idx++;
+ } else {
+ if ((p_chain->u.chain32.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain32.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
- &p_chain->cons_idx,
- &p_chain->pbl.cons_page_idx);
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain32.cons_idx++;
}
- ret = p_chain->p_cons_elem;
- p_chain->cons_idx++;
+ p_ret = p_chain->p_cons_elem;
p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
p_chain->elem_size);
- return ret;
+ return p_ret;
}
/**
@@ -340,16 +421,33 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
- int i;
-
- p_chain->prod_idx = 0;
- p_chain->cons_idx = 0;
- p_chain->p_cons_elem = p_chain->p_virt_addr;
- p_chain->p_prod_elem = p_chain->p_virt_addr;
+ u32 i;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->u.chain16.prod_idx = 0;
+ p_chain->u.chain16.cons_idx = 0;
+ } else {
+ p_chain->u.chain32.prod_idx = 0;
+ p_chain->u.chain32.cons_idx = 0;
+ }
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
- p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1;
- p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1;
+ /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ * indices, to avoid unnecessary page advancing on the first
+ * call to qed_chain_produce/consume. Instead, the indices
+ * will be advanced to page_cnt and then will be wrapped to 0.
+ */
+ u32 reset_val = p_chain->page_cnt - 1;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ } else {
+ p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+ p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ }
}
switch (p_chain->intended_use) {
@@ -377,168 +475,184 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
* @param intended_use
* @param mode
*/
-static inline void qed_chain_init(struct qed_chain *p_chain,
- void *p_virt_addr,
- dma_addr_t p_phys_addr,
- u16 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode)
+static inline void qed_chain_init_params(struct qed_chain *p_chain,
+ u32 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type)
{
/* chain fixed parameters */
- p_chain->p_virt_addr = p_virt_addr;
- p_chain->p_phys_addr = p_phys_addr;
+ p_chain->p_virt_addr = NULL;
+ p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->page_cnt = page_cnt;
+ p_chain->intended_use = intended_use;
p_chain->mode = mode;
+ p_chain->cnt_type = cnt_type;
- p_chain->intended_use = intended_use;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- p_chain->usable_per_page =
- USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->capacity = p_chain->usable_per_page * page_cnt;
- p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
-
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
-
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
- if (mode == QED_CHAIN_MODE_NEXT_PTR) {
- struct qed_chain_next *p_next;
- u16 i;
-
- for (i = 0; i < page_cnt - 1; i++) {
- /* Increment mem_phy to the next page. */
- p_phys_addr += QED_CHAIN_PAGE_SIZE;
-
- /* Initialize the physical address of the next page. */
- p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
- elem_size *
- p_chain->
- usable_per_page);
-
- p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
- p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
-
- /* Initialize the virtual address of the next page. */
- p_next->next_virt = (void *)((u8 *)p_virt_addr +
- QED_CHAIN_PAGE_SIZE);
-
- /* Move to the next page. */
- p_virt_addr = p_next->next_virt;
- }
-
- /* Last page's next should point to beginning of the chain */
- p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
- elem_size *
- p_chain->usable_per_page);
+ p_chain->page_cnt = page_cnt;
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
- p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr);
- p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr);
- p_next->next_virt = p_chain->p_virt_addr;
- }
- qed_chain_reset(p_chain);
+ p_chain->pbl.p_phys_table = 0;
+ p_chain->pbl.p_virt_table = NULL;
+ p_chain->pbl.pp_virt_addr_tbl = NULL;
}
/**
- * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
- * struct
+ * @brief qed_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
- * @param page_cnt number of pages in the allocated buffer
- * @param elem_size size of each element in the chain
- * @param use_mode
- * @param p_phys_pbl pointer to a pre-allocated side table
- * which will hold physical page addresses.
- * @param p_virt_pbl pointer to a pre allocated side table
- * which will hold virtual page addresses.
+ *
*/
-static inline void
-qed_chain_pbl_init(struct qed_chain *p_chain,
- void *p_virt_addr,
- dma_addr_t p_phys_addr,
- u16 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode use_mode,
- dma_addr_t p_phys_pbl,
- dma_addr_t *p_virt_pbl)
+static inline void qed_chain_init_mem(struct qed_chain *p_chain,
+ void *p_virt_addr, dma_addr_t p_phys_addr)
{
- dma_addr_t *p_pbl_dma = p_virt_pbl;
- int i;
-
- qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
- elem_size, use_mode, QED_CHAIN_MODE_PBL);
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+}
+/**
+ * @brief qed_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl pointer to a pre allocated side table which will hold
+ * virtual page addresses.
+ * @param p_phys_pbl pointer to a pre-allocated side table which will hold
+ * physical page addresses.
+ * @param pp_virt_addr_tbl
+ * pointer to a pre-allocated side table which will hold
+ * the virtual addresses of the chain pages.
+ *
+ */
+static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+ void **pp_virt_addr_tbl)
+{
p_chain->pbl.p_phys_table = p_phys_pbl;
p_chain->pbl.p_virt_table = p_virt_pbl;
-
- /* Fill the PBL with physical addresses*/
- for (i = 0; i < page_cnt; i++) {
- *p_pbl_dma = p_phys_addr;
- p_phys_addr += QED_CHAIN_PAGE_SIZE;
- p_pbl_dma++;
- }
+ p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given
- * value
+ * @brief qed_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr virtual address of a chain page of which the next
+ * pointer element is initialized
+ * @param p_virt_next virtual address of the next chain page
+ * @param p_phys_next physical address of the next chain page
*
- * @param prod_idx
- * @param p_prod_elem
*/
-static inline void qed_chain_set_prod(struct qed_chain *p_chain,
- u16 prod_idx,
- void *p_prod_elem)
+static inline void
+qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
+ void *p_virt_curr,
+ void *p_virt_next, dma_addr_t p_phys_next)
{
- p_chain->prod_idx = prod_idx;
- p_chain->p_prod_elem = p_prod_elem;
+ struct qed_chain_next *p_next;
+ u32 size;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
+
+ DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+ p_next->next_virt = p_virt_next;
}
/**
- * @brief qed_chain_get_elem -
+ * @brief qed_chain_get_last_elem -
*
- * get a pointer to an element represented by absolute idx
+ * Returns a pointer to the last element of the chain
*
* @param p_chain
- * @assumption p_chain->size is a power of 2
*
- * @return void*, a pointer to next element
+ * @return void*
*/
-static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
- u16 idx)
+static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if (idx >= p_chain->size)
- return NULL;
+ struct qed_chain_next *p_next = NULL;
+ void *p_virt_addr = NULL;
+ u32 size, last_page_idx;
- ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+ if (!p_chain->p_virt_addr)
+ goto out;
- return ret;
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_virt_addr = p_chain->p_virt_addr;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
+ while (p_next->next_virt != p_chain->p_virt_addr) {
+ p_virt_addr = p_next->next_virt;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ size);
+ }
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ p_virt_addr = p_chain->p_virt_addr;
+ break;
+ case QED_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+ p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+ size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+ p_virt_addr = (u8 *)p_virt_addr + size;
+out:
+ return p_virt_addr;
}
/**
- * @brief qed_chain_sge_inc_cons_prod
+ * @brief qed_chain_set_prod - sets the prod to the given value
*
- * for sge chains, producer isn't increased serially, the ring
- * is expected to be full at all times. Once elements are
- * consumed, they are immediately produced.
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx = (u16) prod_idx;
+ else
+ p_chain->u.chain32.prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
- * @param cnt
- *
- * @return inline void
*/
-static inline void
-qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
- u16 cnt)
+static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
- p_chain->prod_idx += cnt;
- p_chain->cons_idx += cnt;
+ u32 i, page_cnt;
+
+ if (p_chain->mode != QED_CHAIN_MODE_PBL)
+ return;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+ memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+ QED_CHAIN_PAGE_SIZE);
}
#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 6c876a635..4475a9d8a 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -114,6 +114,7 @@ struct qed_queue_start_common_params {
u8 vport_id;
u16 sb;
u16 sb_idx;
+ u16 vf_qid;
};
struct qed_tunn_params {
@@ -128,11 +129,73 @@ struct qed_eth_cb_ops {
void (*force_mac) (void *dev, u8 *mac);
};
+#ifdef CONFIG_DCB
+/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
+ * of dcbnl_rtnl_ops structure.
+ */
+struct qed_eth_dcbnl_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app);
+ int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app);
+
+ /* CEE std */
+ u8 (*getstate)(struct qed_dev *cdev);
+ u8 (*setstate)(struct qed_dev *cdev, u8 state);
+ void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map);
+ void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+ void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map);
+ void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+ void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting);
+ void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting);
+ u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap);
+ int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num);
+ u8 (*getpfcstate)(struct qed_dev *cdev);
+ int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id);
+ u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct qed_dev *cdev);
+ void (*setpgtccfgtx)(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+ void (*setpgtccfgrx)(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+ void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+ void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+ u8 (*setall)(struct qed_dev *cdev);
+ int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num);
+ void (*setpfcstate)(struct qed_dev *cdev, u8 state);
+ int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up);
+ u8 (*setdcbx)(struct qed_dev *cdev, u8 state);
+ u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags);
+
+ /* Peer apps */
+ int (*peer_getappinfo)(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count);
+ int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
+
+ /* CEE peer */
+ int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc);
+ int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
+};
+#endif
+
struct qed_eth_ops {
const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
const struct qed_iov_hv_ops *iov;
#endif
+#ifdef CONFIG_DCB
+ const struct qed_eth_dcbnl_ops *dcb;
+#endif
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4c29439f5..d6c4177df 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,104 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#ifdef CONFIG_DCB
+#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
+#define QED_LLDP_PORT_ID_STAT_LEN 4
+#define QED_DCBX_MAX_APP_PROTOCOL 32
+#define QED_MAX_PFC_PRIORITIES 8
+#define QED_DCBX_DSCP_SIZE 64
+
+struct qed_dcbx_lldp_remote {
+ u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
+};
+
+struct qed_dcbx_lldp_local {
+ u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+};
+
+struct qed_dcbx_app_prio {
+ u8 roce;
+ u8 roce_v2;
+ u8 fcoe;
+ u8 iscsi;
+ u8 eth;
+};
+
+struct qed_dbcx_pfc_params {
+ bool willing;
+ bool enabled;
+ u8 prio[QED_MAX_PFC_PRIORITIES];
+ u8 max_tc;
+};
+
+enum qed_dcbx_sf_ieee_type {
+ QED_DCBX_SF_IEEE_ETHTYPE,
+ QED_DCBX_SF_IEEE_TCP_PORT,
+ QED_DCBX_SF_IEEE_UDP_PORT,
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
+struct qed_app_entry {
+ bool ethtype;
+ enum qed_dcbx_sf_ieee_type sf_ieee;
+ bool enabled;
+ u8 prio;
+ u16 proto_id;
+ enum dcbx_protocol_type proto_type;
+};
+
+struct qed_dcbx_params {
+ struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool app_error;
+ bool ets_willing;
+ bool ets_enabled;
+ bool ets_cbs;
+ bool valid;
+ u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
+ u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
+ u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
+ struct qed_dbcx_pfc_params pfc;
+ u8 max_ets_tc;
+};
+
+struct qed_dcbx_admin_params {
+ struct qed_dcbx_params params;
+ bool valid;
+};
+
+struct qed_dcbx_remote_params {
+ struct qed_dcbx_params params;
+ bool valid;
+};
+
+struct qed_dcbx_operational_params {
+ struct qed_dcbx_app_prio app_prio;
+ struct qed_dcbx_params params;
+ bool valid;
+ bool enabled;
+ bool ieee;
+ bool cee;
+ u32 err;
+};
+
+struct qed_dcbx_get {
+ struct qed_dcbx_operational_params operational;
+ struct qed_dcbx_lldp_remote lldp_remote;
+ struct qed_dcbx_lldp_local lldp_local;
+ struct qed_dcbx_remote_params remote;
+ struct qed_dcbx_admin_params local;
+};
+#endif
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -58,8 +156,70 @@ struct qed_eth_pf_params {
u16 num_cons;
};
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct qed_iscsi_pf_params {
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+ u32 max_cwnd;
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u16 dup_ack_threshold;
+ u16 tx_sws_timer;
+ u16 min_rto;
+ u16 min_rto_rt;
+ u16 max_rto;
+
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 half_way_close_timeout;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 cmdq_xoff_threshold;
+ u16 cmdq_xon_threshold;
+ u16 rq_buffer_size;
+
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 log_page_size;
+ u8 rqe_log_size;
+ u8 max_fin_rt;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u8 ooo_enable;
+
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
+struct qed_rdma_pf_params {
+ /* Supplied to QED during resource allocation (may affect the ILT and
+ * the doorbell BAR).
+ */
+ u32 min_dpis; /* number of requested DPIs */
+ u32 num_mrs; /* number of requested memory regions */
+ u32 num_qps; /* number of requested Queue Pairs */
+ u32 num_srqs; /* number of requested SRQ */
+ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
+ u8 gl_pi; /* protocol index */
+
+ /* Will allocate rate limiters to be used with QPs */
+ u8 enable_dcqcn;
+};
+
struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
+ struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_rdma_pf_params rdma_pf_params;
};
enum qed_int_mode {
@@ -100,6 +260,8 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
+ bool rdma_supported;
+
u32 flash_size;
u8 mf_mode;
bool tx_switching;
@@ -111,6 +273,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
+ QED_PROTOCOL_ISCSI,
};
struct qed_link_params {
@@ -325,7 +488,8 @@ struct qed_common_ops {
int (*chain_alloc)(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
- u16 num_elems,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain);
@@ -333,6 +497,30 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
+ * @brief get_coalesce - Get coalesce parameters in usec
+ *
+ * @param cdev
+ * @param rx_coal - Rx coalesce value in usec
+ * @param tx_coal - Tx coalesce value in usec
+ *
+ */
+ void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
+
+/**
+ * @brief set_coalesce - Configure Rx coalesce value in usec
+ *
+ * @param cdev
+ * @param rx_coal - Rx coalesce value in usec
+ * @param tx_coal - Tx coalesce value in usec
+ * @param qid - Queue index
+ * @param sb_id - Status Block Id
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+ u8 qid, u16 sb_id);
+
+/**
* @brief set_led - Configure LED mode
*
* @param cdev
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
new file mode 100644
index 000000000..187991c1f
--- /dev/null
+++ b/include/linux/qed/rdma_common.h
@@ -0,0 +1,44 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __RDMA_COMMON__
+#define __RDMA_COMMON__
+/************************/
+/* RDMA FW CONSTANTS */
+/************************/
+
+#define RDMA_RESERVED_LKEY (0)
+#define RDMA_RING_PAGE_SIZE (0x1000)
+
+#define RDMA_MAX_SGE_PER_SQ_WQE (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE (4)
+
+#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF)
+
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
+
+#define RDMA_MAX_CQS (64 * 1024)
+#define RDMA_MAX_TIDS (128 * 1024 - 1)
+#define RDMA_MAX_PDS (64 * 1024)
+
+#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
+
+struct rdma_srq_id {
+ __le16 srq_idx;
+ __le16 opaque_fid;
+};
+
+struct rdma_srq_producers {
+ __le32 sge_prod;
+ __le32 wqe_prod;
+};
+
+#endif /* __RDMA_COMMON__ */
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
new file mode 100644
index 000000000..2eeaf3dc6
--- /dev/null
+++ b/include/linux/qed/roce_common.h
@@ -0,0 +1,17 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ROCE_COMMON__
+#define __ROCE_COMMON__
+
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+
+#define ROCE_MAX_QPS (32 * 1024)
+
+#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
new file mode 100644
index 000000000..3b8e1efd9
--- /dev/null
+++ b/include/linux/qed/storage_common.h
@@ -0,0 +1,91 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __STORAGE_COMMON__
+#define __STORAGE_COMMON__
+
+#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ (0)
+#define BDQ_ID_IMM_DATA (1)
+#define BDQ_NUM_IDS (2)
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+
+struct scsi_bd {
+ struct regpair address;
+ struct regpair opaque;
+};
+
+struct scsi_bdq_ram_drv_data {
+ __le16 external_producer;
+ __le16 reserved0[3];
+};
+
+struct scsi_drv_cmdq {
+ __le16 cmdq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct scsi_init_func_params {
+ __le16 num_tasks;
+ u8 log_page_size;
+ u8 debug_mode;
+ u8 reserved2[12];
+};
+
+struct scsi_init_func_queues {
+ struct regpair glbl_q_params_addr;
+ __le16 rq_buffer_size;
+ __le16 cq_num_entries;
+ __le16 cmdq_num_entries;
+ u8 bdq_resource_id;
+ u8 q_validity;
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+ u8 num_queues;
+ u8 queue_relative_offset;
+ u8 cq_sb_pi;
+ u8 cmdq_sb_pi;
+ __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
+ __le16 reserved0;
+ u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+ struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
+ __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
+ __le16 bdq_xon_threshold[BDQ_NUM_IDS];
+ __le16 cmdq_xoff_threshold;
+ __le16 cmdq_xon_threshold;
+ __le32 reserved1;
+};
+
+struct scsi_ram_per_bdq_resource_drv_data {
+ struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
+};
+
+struct scsi_sge {
+ struct regpair sge_addr;
+ __le16 sge_len;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct scsi_terminate_extra_params {
+ __le16 unsolicited_cq_count;
+ __le16 cmdq_count;
+ u8 reserved[4];
+};
+
+#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
new file mode 100644
index 000000000..accba0e6b
--- /dev/null
+++ b/include/linux/qed/tcp_common.h
@@ -0,0 +1,226 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __TCP_COMMON__
+#define __TCP_COMMON__
+
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+enum tcp_connect_mode {
+ TCP_CONNECT_ACTIVE,
+ TCP_CONNECT_PASSIVE,
+ MAX_TCP_CONNECT_MODE
+};
+
+struct tcp_init_params {
+ __le32 max_cwnd;
+ __le16 dup_ack_threshold;
+ __le16 tx_sws_timer;
+ __le16 min_rto;
+ __le16 min_rto_rt;
+ __le16 max_rto;
+ u8 maxfinrt;
+ u8 reserved[1];
+};
+
+enum tcp_ip_version {
+ TCP_IPV4,
+ TCP_IPV6,
+ MAX_TCP_IP_VERSION
+};
+
+struct tcp_offload_params {
+ __le16 local_mac_addr_lo;
+ __le16 local_mac_addr_mid;
+ __le16 local_mac_addr_hi;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 vlan_id;
+ u8 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
+ u8 ip_version;
+ __le32 remote_ip[4];
+ __le32 local_ip[4];
+ __le32 flow_label;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le16 remote_port;
+ __le16 local_port;
+ __le16 mss;
+ u8 rcv_wnd_scale;
+ u8 connect_mode;
+ __le16 srtt;
+ __le32 cwnd;
+ __le32 ss_thresh;
+ __le16 reserved1;
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ __le32 rcv_next;
+ __le32 snd_una;
+ __le32 snd_next;
+ __le32 snd_max;
+ __le32 snd_wnd;
+ __le32 rcv_wnd;
+ __le32 snd_wl1;
+ __le32 ts_time;
+ __le32 ts_recent;
+ __le32 ts_recent_age;
+ __le32 total_rt;
+ __le32 ka_timeout_delta;
+ __le32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ __le16 rtt_var;
+ __le16 reserved2;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 initial_rcv_wnd;
+ u8 snd_wnd_scale;
+ u8 ack_frequency;
+ __le16 da_timeout_value;
+ __le32 ts_ticks_per_second;
+};
+
+struct tcp_offload_params_opt2 {
+ __le16 local_mac_addr_lo;
+ __le16 local_mac_addr_mid;
+ __le16 local_mac_addr_hi;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 vlan_id;
+ u8 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+ u8 ip_version;
+ __le32 remote_ip[4];
+ __le32 local_ip[4];
+ __le32 flow_label;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le16 remote_port;
+ __le16 local_port;
+ __le16 mss;
+ u8 rcv_wnd_scale;
+ u8 connect_mode;
+ __le16 syn_ip_payload_length;
+ __le32 syn_phy_addr_lo;
+ __le32 syn_phy_addr_hi;
+ __le32 reserved1[22];
+};
+
+enum tcp_seg_placement_event {
+ TCP_EVENT_ADD_PEN,
+ TCP_EVENT_ADD_NEW_ISLE,
+ TCP_EVENT_ADD_ISLE_RIGHT,
+ TCP_EVENT_ADD_ISLE_LEFT,
+ TCP_EVENT_JOIN,
+ TCP_EVENT_NOP,
+ MAX_TCP_SEG_PLACEMENT_EVENT
+};
+
+struct tcp_update_params {
+ __le16 flags;
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 mss;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 flow_label;
+ __le32 initial_rcv_wnd;
+ u8 ka_max_probe_cnt;
+ u8 reserved1[7];
+};
+
+struct tcp_upload_params {
+ __le32 rcv_next;
+ __le32 snd_una;
+ __le32 snd_next;
+ __le32 snd_max;
+ __le32 snd_wnd;
+ __le32 rcv_wnd;
+ __le32 snd_wl1;
+ __le32 cwnd;
+ __le32 ss_thresh;
+ __le16 srtt;
+ __le16 rtt_var;
+ __le32 ts_time;
+ __le32 ts_recent;
+ __le32 ts_recent_age;
+ __le32 total_rt;
+ __le32 ka_timeout_delta;
+ __le32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ __le32 reserved;
+};
+
+#endif /* __TCP_COMMON__ */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 9dfb6bce8..55107a8ff 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -179,6 +179,16 @@ static inline struct kqid make_kqid_projid(kprojid_t projid)
return kqid;
}
+/**
+ * qid_has_mapping - Report if a qid maps into a user namespace.
+ * @ns: The user namespace to see if a value maps into.
+ * @qid: The kernel internal quota identifier to test.
+ */
+static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid)
+{
+ return from_kqid(ns, qid) != (qid_t) -1;
+}
+
extern spinlock_t dq_data_lock;
@@ -200,8 +210,8 @@ struct mem_dqblk {
qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
qsize_t dqb_isoftlimit; /* preferred inode limit */
qsize_t dqb_curinodes; /* current # allocated inodes */
- time_t dqb_btime; /* time limit for excessive disk use */
- time_t dqb_itime; /* time limit for excessive inode use */
+ time64_t dqb_btime; /* time limit for excessive disk use */
+ time64_t dqb_itime; /* time limit for excessive inode use */
};
/*
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index eca6f626c..4c45105de 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -35,7 +35,7 @@
* 00 - data pointer
* 01 - internal entry
* 10 - exceptional entry
- * 11 - locked exceptional entry
+ * 11 - this bit combination is currently unused/reserved
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
@@ -291,6 +291,7 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
+int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
diff --git a/include/linux/random.h b/include/linux/random.h
index e47e53374..3d6e9815c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
-static inline int arch_get_random_long(unsigned long *v)
+static inline bool arch_get_random_long(unsigned long *v)
{
return 0;
}
-static inline int arch_get_random_int(unsigned int *v)
+static inline bool arch_get_random_int(unsigned int *v)
{
return 0;
}
-static inline int arch_has_random(void)
+static inline bool arch_has_random(void)
{
return 0;
}
-static inline int arch_get_random_seed_long(unsigned long *v)
+static inline bool arch_get_random_seed_long(unsigned long *v)
{
return 0;
}
-static inline int arch_get_random_seed_int(unsigned int *v)
+static inline bool arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
-static inline int arch_has_random_seed(void)
+static inline bool arch_has_random_seed(void)
{
return 0;
}
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 181025292..57c9e0622 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -2,11 +2,15 @@
#define _LINUX_RATELIMIT_H
#include <linux/param.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10
+/* issue num suppressed message on exit */
+#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
@@ -15,6 +19,7 @@ struct ratelimit_state {
int printed;
int missed;
unsigned long begin;
+ unsigned long flags;
};
#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
@@ -34,12 +39,35 @@ struct ratelimit_state {
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
+ memset(rs, 0, sizeof(*rs));
+
raw_spin_lock_init(&rs->lock);
- rs->interval = interval;
- rs->burst = burst;
- rs->printed = 0;
- rs->missed = 0;
- rs->begin = 0;
+ rs->interval = interval;
+ rs->burst = burst;
+}
+
+static inline void ratelimit_default_init(struct ratelimit_state *rs)
+{
+ return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+}
+
+static inline void ratelimit_state_exit(struct ratelimit_state *rs)
+{
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
+ return;
+
+ if (rs->missed) {
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
+ current->comm, rs->missed);
+ rs->missed = 0;
+ }
+}
+
+static inline void
+ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
+{
+ rs->flags = flags;
}
extern struct ratelimit_state printk_ratelimit_state;
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index b6900099e..e58501849 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -76,6 +76,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
+extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 14d7b831b..d076183e4 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -130,6 +130,19 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
WRITE_ONCE(root->rb_node, new);
}
+static inline void
+__rb_change_child_rcu(struct rb_node *old, struct rb_node *new,
+ struct rb_node *parent, struct rb_root *root)
+{
+ if (parent) {
+ if (parent->rb_left == old)
+ rcu_assign_pointer(parent->rb_left, new);
+ else
+ rcu_assign_pointer(parent->rb_right, new);
+ } else
+ rcu_assign_pointer(root->rb_node, new);
+}
+
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5f1533e3d..1aa62e1a7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -45,6 +45,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
+#include <linux/irqflags.h>
#include <asm/barrier.h>
@@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void)
* in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
- * will tell RCU that it needs to pay attending, invoke its argument
- * (in this example, a call to the do_something_with_RCU() function),
+ * will tell RCU that it needs to pay attention, invoke its argument
+ * (in this example, calling the do_something_with_RCU() function),
* and then tell RCU to go back to ignoring this CPU. It is permissible
- * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
- * quite limited. If deeper nesting is required, it will be necessary
- * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
+ * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
+ * on the order of a million or so, even on 32-bit systems). It is
+ * not legal to block within RCU_NONIDLE(), nor is it permissible to
+ * transfer control either into or out of RCU_NONIDLE()'s statement.
*/
#define RCU_NONIDLE(a) \
do { \
@@ -611,6 +613,12 @@ static inline void rcu_preempt_sleep_check(void)
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
+#define rcu_dereference_raw(p) \
+({ \
+ /* Dependency order vs. p above. */ \
+ typeof(p) ________p1 = lockless_dereference(p); \
+ ((typeof(*p) __force __kernel *)(________p1)); \
+})
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -649,7 +657,16 @@ static inline void rcu_preempt_sleep_check(void)
* please be careful when making changes to rcu_assign_pointer() and the
* other macros that it invokes.
*/
-#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
+#define rcu_assign_pointer(p, v) \
+({ \
+ uintptr_t _r_a_p__v = (uintptr_t)(v); \
+ \
+ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
+ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
+ else \
+ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
+ _r_a_p__v; \
+})
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
@@ -729,8 +746,6 @@ static inline void rcu_preempt_sleep_check(void)
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
-#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
-
/*
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 93aea7502..ac81e4063 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -243,4 +243,11 @@ static inline void rcu_all_qs(void)
barrier(); /* Avoid RCU read-side critical sections leaking across. */
}
+/* RCUtree hotplug events */
+#define rcutree_prepare_cpu NULL
+#define rcutree_online_cpu NULL
+#define rcutree_offline_cpu NULL
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5043cb823..63a4e4cf4 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -111,4 +111,11 @@ bool rcu_is_watching(void);
void rcu_all_qs(void);
+/* RCUtree hotplug events */
+int rcutree_prepare_cpu(unsigned int cpu);
+int rcutree_online_cpu(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+int rcutree_dead_cpu(unsigned int cpu);
+int rcutree_dying_cpu(unsigned int cpu);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 3dc08ce15..2c12cc5af 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -95,6 +95,45 @@ struct reg_sequence {
#define regmap_fields_force_update_bits(field, id, mask, val) \
regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+/**
+ * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ int ret; \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ ret = regmap_read((map), (addr), &(val)); \
+ if (ret) \
+ break; \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -851,6 +890,12 @@ struct regmap_irq {
* @num_type_reg: Number of type registers.
* @type_reg_stride: Stride to use for chips where type registers are not
* contiguous.
+ * @handle_pre_irq: Driver specific callback to handle interrupt from device
+ * before regmap_irq_handler process the interrupts.
+ * @handle_post_irq: Driver specific callback to handle interrupt from device
+ * after handling the interrupts in regmap_irq_handler().
+ * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
+ * driver specific pre/post interrupt handler is called.
*/
struct regmap_irq_chip {
const char *name;
@@ -877,6 +922,10 @@ struct regmap_irq_chip {
int num_type_reg;
unsigned int type_reg_stride;
+
+ int (*handle_pre_irq)(void *irq_drv_data);
+ int (*handle_post_irq)(void *irq_drv_data);
+ void *irq_drv_data;
};
struct regmap_irq_chip_data;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 48603506f..cae500b2c 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -224,7 +224,6 @@ int regulator_bulk_force_disable(int num_consumers,
void regulator_bulk_free(int num_consumers,
struct regulator_bulk_data *consumers);
-int regulator_can_change_voltage(struct regulator *regulator);
int regulator_count_voltages(struct regulator *regulator);
int regulator_list_voltage(struct regulator *regulator, unsigned selector);
int regulator_is_supported_voltage(struct regulator *regulator,
@@ -436,11 +435,6 @@ static inline void regulator_bulk_free(int num_consumers,
{
}
-static inline int regulator_can_change_voltage(struct regulator *regulator)
-{
- return 0;
-}
-
static inline int regulator_set_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index a43a5ca11..80cb40b7c 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,5 +1,6 @@
/*
- * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
+ * da9211.h - Regulator device driver for DA9211/DA9212
+ * /DA9213/DA9214/DA9215
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -22,7 +23,9 @@
enum da9211_chip_id {
DA9211,
+ DA9212,
DA9213,
+ DA9214,
DA9215,
};
diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h
new file mode 100644
index 000000000..67011cd1c
--- /dev/null
+++ b/include/linux/regulator/mt6323-regulator.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6323_H
+#define __LINUX_REGULATOR_MT6323_H
+
+enum {
+ MT6323_ID_VPROC = 0,
+ MT6323_ID_VSYS,
+ MT6323_ID_VPA,
+ MT6323_ID_VTCXO,
+ MT6323_ID_VCN28,
+ MT6323_ID_VCN33_BT,
+ MT6323_ID_VCN33_WIFI,
+ MT6323_ID_VA,
+ MT6323_ID_VCAMA,
+ MT6323_ID_VIO28 = 9,
+ MT6323_ID_VUSB,
+ MT6323_ID_VMC,
+ MT6323_ID_VMCH,
+ MT6323_ID_VEMC3V3,
+ MT6323_ID_VGP1,
+ MT6323_ID_VGP2,
+ MT6323_ID_VGP3,
+ MT6323_ID_VCN18,
+ MT6323_ID_VSIM1,
+ MT6323_ID_VSIM2,
+ MT6323_ID_VRTC,
+ MT6323_ID_VCAMAF,
+ MT6323_ID_VIBR,
+ MT6323_ID_VRF18,
+ MT6323_ID_VM,
+ MT6323_ID_VIO18,
+ MT6323_ID_VCAMD,
+ MT6323_ID_VCAMIO,
+ MT6323_ID_RG_MAX,
+};
+
+#define MT6323_MAX_REGULATOR MT6323_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6323_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index b91ba932b..db1fe6772 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -53,4 +53,8 @@ struct reset_controller_dev {
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
+struct device;
+int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev);
+
#endif
diff --git a/include/linux/rio.h b/include/linux/rio.h
index aa2323893..37b95c4af 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -163,6 +163,7 @@ enum rio_device_state {
* @dst_ops: Destination operation capabilities
* @comp_tag: RIO component tag
* @phys_efptr: RIO device extended features pointer
+ * @phys_rmap: LP-Serial Register Map Type (1 or 2)
* @em_efptr: RIO Error Management features pointer
* @dma_mask: Mask of bits of RIO address this device implements
* @driver: Driver claiming this device
@@ -193,6 +194,7 @@ struct rio_dev {
u32 dst_ops;
u32 comp_tag;
u32 phys_efptr;
+ u32 phys_rmap;
u32 em_efptr;
u64 dma_mask;
struct rio_driver *driver; /* RIO driver claiming this device */
@@ -237,11 +239,6 @@ struct rio_dbell {
void *dev_id;
};
-enum rio_phy_type {
- RIO_PHY_PARALLEL,
- RIO_PHY_SERIAL,
-};
-
/**
* struct rio_mport - RIO master port info
* @dbells: List of doorbell events
@@ -259,8 +256,8 @@ enum rio_phy_type {
* @id: Port ID, unique among all ports
* @index: Port index, unique among all port interfaces of the same type
* @sys_size: RapidIO common transport system size
- * @phy_type: RapidIO phy type
* @phys_efptr: RIO port extended features pointer
+ * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2).
* @name: Port name string
* @dev: device structure associated with an mport
* @priv: Master port private data
@@ -289,8 +286,8 @@ struct rio_mport {
* 0 - Small size. 256 devices.
* 1 - Large size, 65536 devices.
*/
- enum rio_phy_type phy_type; /* RapidIO phy type */
u32 phys_efptr;
+ u32 phys_rmap;
unsigned char name[RIO_MAX_MPORT_NAME];
struct device dev;
void *priv; /* Master port private data */
@@ -425,7 +422,7 @@ struct rio_ops {
int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
void *(*get_inb_message)(struct rio_mport *mport, int mbox);
int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
- u64 rstart, u32 size, u32 flags);
+ u64 rstart, u64 size, u32 flags);
void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
int (*query_mport)(struct rio_mport *mport,
struct rio_mport_attr *attr);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 2543bc163..334c576c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -38,5 +38,7 @@
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
#define RIO_DID_TSI721 0x80ab
+#define RIO_DID_IDTRXS1632 0x80e5
+#define RIO_DID_IDTRXS2448 0x80e6
#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 1063ae382..40c04efe7 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -42,9 +42,11 @@
#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */
#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */
#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */
+#define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */
#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
-#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
+#define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */
+#define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */
#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */
#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */
#define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */
@@ -194,70 +196,101 @@
#define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK)
/* Extended Feature Block IDs */
-#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */
-#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */
-#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */
-#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */
-#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */
-#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */
-#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */
+#define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */
+#define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */
+#define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */
+#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */
+#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */
+#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */
#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */
+#define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */
+#define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */
+#define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */
+#define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */
+#define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */
+#define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */
+#define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */
/*
- * Physical 8/16 LP-LVDS
- * ID=0x0001, Generic End Point Devices
- * ID=0x0002, Generic End Point Devices, software assisted recovery option
- * ID=0x0003, Generic End Point Free Devices
- *
- * Physical LP-Serial
- * ID=0x0004, Generic End Point Devices
- * ID=0x0005, Generic End Point Devices, software assisted recovery option
- * ID=0x0006, Generic End Point Free Devices
+ * Physical LP-Serial Registers Definitions
+ * Parameters in register macros:
+ * n - port number, m - Register Map Type (1 or 2)
*/
#define RIO_PORT_MNT_HEADER 0x0000
#define RIO_PORT_REQ_CTL_CSR 0x0020
-#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */
-#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */
-#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */
+#define RIO_PORT_RSP_CTL_CSR 0x0024
+#define RIO_PORT_LINKTO_CTL_CSR 0x0020
+#define RIO_PORT_RSPTO_CTL_CSR 0x0024
#define RIO_PORT_GEN_CTL_CSR 0x003c
#define RIO_PORT_GEN_HOST 0x80000000
#define RIO_PORT_GEN_MASTER 0x40000000
#define RIO_PORT_GEN_DISCOVERED 0x20000000
-#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m)))
#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */
#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */
-#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m)))
#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */
#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
-#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */
#define RIO_PORT_N_ACK_CLEAR 0x80000000
#define RIO_PORT_N_ACK_INBOUND 0x3f000000
#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
-#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20)
+#define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m)))
#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000
-#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
-#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
-#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
+#define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m)))
+#define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */
+#define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */
#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */
+#define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */
#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
-#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
+#define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m)))
#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */
#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
-#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
-#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000
-#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000
-#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000
+#define RIO_PORT_N_CTL_EN_RX 0x00200000
+#define RIO_PORT_N_CTL_EN_TX 0x00400000
+#define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */
+#define RIO_PORT_N_OB_ACK_CLEAR 0x80000000
+#define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000
+#define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff
+#define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */
+#define RIO_PORT_N_IB_ACK_INBND 0x00000fff
+
+/*
+ * Device-based helper macros for serial port register access.
+ * d - pointer to rapidio device object, n - port number
+ */
+
+#define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n))
+
+#define RIO_DEV_PORT_N_CTL2_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_CTL_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n))
+
+#define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n))
/*
* Error Management Extensions (RapidIO 1.3+, Part 8)
@@ -268,6 +301,7 @@
/* General EM Registers (Common for all Ports) */
#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
+#define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */
#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */
@@ -278,15 +312,33 @@
#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */
+#define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */
+#define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */
#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */
+#define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */
+#define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */
+#define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */
+#define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */
#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */
+#define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */
+#define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */
+#define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */
+#define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */
/* Per-Port EM Registers */
#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */
#define REM_PED_IMPL_SPEC 0x80000000
+#define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */
+#define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */
+#define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */
#define REM_PED_LINK_TO 0x00000001
+
#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
+#define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */
+#define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */
+#define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */
+
#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */
#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
@@ -294,5 +346,50 @@
#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */
#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
+#define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */
+#define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */
+
+/*
+ * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3)
+ * Register offsets are defined from beginning of the block.
+ */
+
+/* Broadcast Routing Table Control CSR */
+#define RIO_BC_RT_CTL_CSR 0x020
+#define RIO_RT_CTL_THREE_LVL 0x80000000
+#define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000
+#define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */
+
+/* Broadcast Level 0 Info CSR */
+#define RIO_BC_RT_LVL0_INFO_CSR 0x030
+#define RIO_RT_L0I_NUM_GR 0xff000000
+#define RIO_RT_L0I_GR_PTR 0x00fffc00
+
+/* Broadcast Level 1 Info CSR */
+#define RIO_BC_RT_LVL1_INFO_CSR 0x034
+#define RIO_RT_L1I_NUM_GR 0xff000000
+#define RIO_RT_L1I_GR_PTR 0x00fffc00
+
+/* Broadcast Level 2 Info CSR */
+#define RIO_BC_RT_LVL2_INFO_CSR 0x038
+#define RIO_RT_L2I_NUM_GR 0xff000000
+#define RIO_RT_L2I_GR_PTR 0x00fffc00
+
+/* Per-Port Routing Table registers.
+ * Register fields defined in the broadcast section above are
+ * applicable to the corresponding registers below.
+ */
+#define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x))
+#define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x))
+#define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x))
+#define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x))
+
+/* Register Formats for Routing Table Group entry.
+ * Register offsets are calculated using GR_PTR field in the corresponding
+ * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3).
+ */
+#define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000
+#define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff
+#define RIO_RT_ENTRY_DROP_PKT 0x300
#endif /* LINUX_RIO_REGS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 2b0fad836..b46bb5620 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -165,7 +165,7 @@ void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, int);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, bool);
-void page_add_file_rmap(struct page *);
+void page_add_file_rmap(struct page *, bool);
void page_remove_rmap(struct page *, bool);
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
diff --git a/include/linux/rtc/ds1286.h b/include/linux/rtc/ds1286.h
new file mode 100644
index 000000000..45ea0aa0a
--- /dev/null
+++ b/include/linux/rtc/ds1286.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 1998, 1999, 2003 Ralf Baechle
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __LINUX_DS1286_H
+#define __LINUX_DS1286_H
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_HUNDREDTH_SECOND 0
+#define RTC_SECONDS 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+#define RTC_DAY 6
+#define RTC_DAY_ALARM 7
+#define RTC_DATE 8
+#define RTC_MONTH 9
+#define RTC_YEAR 10
+#define RTC_CMD 11
+#define RTC_WHSEC 12
+#define RTC_WSEC 13
+#define RTC_UNUSED 14
+
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+
+/*
+ * Bits in the month register
+ */
+#define RTC_EOSC 0x80
+#define RTC_ESQW 0x40
+
+/*
+ * Bits in the Command register
+ */
+#define RTC_TDF 0x01
+#define RTC_WAF 0x02
+#define RTC_TDM 0x04
+#define RTC_WAM 0x08
+#define RTC_PU_LVL 0x10
+#define RTC_IBH_LO 0x20
+#define RTC_IPSW 0x40
+#define RTC_TE 0x80
+
+#endif /* __LINUX_DS1286_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index c006cc900..2daece897 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,8 +89,9 @@ void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
#endif
-extern void rtnetlink_init(void);
-extern void __rtnl_unlock(void);
+void rtnetlink_init(void);
+void __rtnl_unlock(void);
+void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
#define ASSERT_RTNL() do { \
if (unlikely(!rtnl_is_locked())) { \
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d37fbb34d..dd1d14250 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -23,10 +23,11 @@ struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#include <linux/rwsem-spinlock.h> /* use a generic implementation */
+#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
#else
/* All arch specific implementations share the same struct */
struct rw_semaphore {
- long count;
+ atomic_long_t count;
struct list_head wait_list;
raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
- return sem->count != 0;
+ return atomic_long_read(&sem->count) != 0;
}
+#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
#endif
/* Common initializer macros and functions */
@@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
#endif
#define __RWSEM_INITIALIZER(name) \
- { .count = RWSEM_UNLOCKED_VALUE, \
+ { __RWSEM_INIT_COUNT(name), \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
index a53915cd5..c68307bc3 100644
--- a/include/linux/rxrpc.h
+++ b/include/linux/rxrpc.h
@@ -35,21 +35,24 @@ struct sockaddr_rxrpc {
*/
#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
-#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */
+#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
/*
* RxRPC control messages
+ * - If neither abort or accept are specified, the message is a data message.
* - terminal messages mean that a user call ID tag can be recycled
+ * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
*/
-#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */
-#define RXRPC_ABORT 2 /* abort request / notification [terminal] */
-#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */
-#define RXRPC_NET_ERROR 5 /* network error received [terminal] */
-#define RXRPC_BUSY 6 /* server busy received [terminal] */
-#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */
-#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */
-#define RXRPC_ACCEPT 9 /* [Server] accept request */
+#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */
+#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */
+#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */
+#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */
+#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */
+#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */
+#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */
+#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */
+#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */
/*
* RxRPC security levels
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0c40d315f..646bf0947 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,9 +220,10 @@ extern void proc_sched_set_task(struct task_struct *p);
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
-#define TASK_STATE_MAX 2048
+#define TASK_NEW 2048
+#define TASK_STATE_MAX 4096
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -521,6 +522,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#define MMF_OOM_REAPED 21 /* mm has been already reaped */
+#define MMF_OOM_NOT_REAPABLE 22 /* mm couldn't be reaped */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
@@ -1560,6 +1562,9 @@ struct task_struct {
/* unserialized, strictly 'current' */
unsigned in_execve:1; /* bit to tell LSMs we're in execve */
unsigned in_iowait:1;
+#if !defined(TIF_RESTORE_SIGMASK)
+ unsigned restore_sigmask:1;
+#endif
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#ifndef CONFIG_SLOB
@@ -1953,10 +1958,6 @@ extern int arch_task_struct_size __read_mostly;
#endif
#ifdef CONFIG_SCHED_BFS
-bool grunqueue_is_locked(void);
-void grq_unlock_wait(void);
-void cpu_scaling(int cpu);
-void cpu_nonscaling(int cpu);
#define tsk_seruntime(t) ((t)->sched_time)
#define tsk_rttimeout(t) ((t)->rt_timeout)
@@ -1964,11 +1965,6 @@ static inline void tsk_cpus_current(struct task_struct *p)
{
}
-static inline int runqueue_is_locked(int cpu)
-{
- return grunqueue_is_locked();
-}
-
void print_scheduler_version(void);
static inline bool iso_task(struct task_struct *p)
@@ -1976,14 +1972,6 @@ static inline bool iso_task(struct task_struct *p)
return (p->policy == SCHED_ISO);
}
#else /* CFS */
-extern int runqueue_is_locked(int cpu);
-static inline void cpu_scaling(int cpu)
-{
-}
-
-static inline void cpu_nonscaling(int cpu)
-{
-}
#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
#define tsk_rttimeout(t) ((t)->rt.timeout)
@@ -2001,12 +1989,6 @@ static inline bool iso_task(struct task_struct *p)
{
return false;
}
-
-/* Anyone feel like implementing this? */
-static inline bool above_background_load(void)
-{
- return false;
-}
#endif /* CONFIG_SCHED_BFS */
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2023,6 +2005,32 @@ static inline int tsk_nr_cpus_allowed(struct task_struct *p)
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
+static inline bool in_vfork(struct task_struct *tsk)
+{
+ bool ret;
+
+ /*
+ * need RCU to access ->real_parent if CLONE_VM was used along with
+ * CLONE_PARENT.
+ *
+ * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
+ * imply CLONE_VM
+ *
+ * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
+ * ->real_parent is not necessarily the task doing vfork(), so in
+ * theory we can't rely on task_lock() if we want to dereference it.
+ *
+ * And in this case we can't trust the real_parent->mm == tsk->mm
+ * check, it can be false negative. But we do not care, if init or
+ * another oom-unkillable task does this it should blame itself.
+ */
+ rcu_read_lock();
+ ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
+ rcu_read_unlock();
+
+ return ret;
+}
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
@@ -2214,6 +2222,9 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
+struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+struct task_struct *try_get_task_struct(struct task_struct **ptask);
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
@@ -2724,6 +2735,66 @@ extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+#ifdef TIF_RESTORE_SIGMASK
+/*
+ * Legacy restore_sigmask accessors. These are inefficient on
+ * SMP architectures because they require atomic operations.
+ */
+
+/**
+ * set_restore_sigmask() - make sure saved_sigmask processing gets done
+ *
+ * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
+ * will run before returning to user mode, to process the flag. For
+ * all callers, TIF_SIGPENDING is already set or it's no harm to set
+ * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
+ * arch code will notice on return to user mode, in case those bits
+ * are scarce. We set TIF_SIGPENDING here to ensure that the arch
+ * signal code always gets run when TIF_RESTORE_SIGMASK is set.
+ */
+static inline void set_restore_sigmask(void)
+{
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+ return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+
+#else /* TIF_RESTORE_SIGMASK */
+
+/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
+static inline void set_restore_sigmask(void)
+{
+ current->restore_sigmask = true;
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current->restore_sigmask = false;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current->restore_sigmask;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ if (!current->restore_sigmask)
+ return false;
+ current->restore_sigmask = false;
+ return true;
+}
+#endif
+
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 35de50a65..dc5f989be 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -70,6 +70,8 @@ struct scpi_ops {
int (*sensor_get_capability)(u16 *sensors);
int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
int (*sensor_get_value)(u16, u64 *);
+ int (*device_get_power_state)(u16);
+ int (*device_set_power_state)(u16, u8);
};
#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index de1f64318..fcb4c3646 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
-struct sctp_info {
- __u32 sctpi_tag;
- __u32 sctpi_state;
- __u32 sctpi_rwnd;
- __u16 sctpi_unackdata;
- __u16 sctpi_penddata;
- __u16 sctpi_instrms;
- __u16 sctpi_outstrms;
- __u32 sctpi_fragmentation_point;
- __u32 sctpi_inqueue;
- __u32 sctpi_outqueue;
- __u32 sctpi_overall_error;
- __u32 sctpi_max_burst;
- __u32 sctpi_maxseg;
- __u32 sctpi_peer_rwnd;
- __u32 sctpi_peer_tag;
- __u8 sctpi_peer_capable;
- __u8 sctpi_peer_sack;
- __u16 __reserved1;
-
- /* assoc status info */
- __u64 sctpi_isacks;
- __u64 sctpi_osacks;
- __u64 sctpi_opackets;
- __u64 sctpi_ipackets;
- __u64 sctpi_rtxchunks;
- __u64 sctpi_outofseqtsns;
- __u64 sctpi_idupchunks;
- __u64 sctpi_gapcnt;
- __u64 sctpi_ouodchunks;
- __u64 sctpi_iuodchunks;
- __u64 sctpi_oodchunks;
- __u64 sctpi_iodchunks;
- __u64 sctpi_octrlchunks;
- __u64 sctpi_ictrlchunks;
-
- /* primary transport info */
- struct sockaddr_storage sctpi_p_address;
- __s32 sctpi_p_state;
- __u32 sctpi_p_cwnd;
- __u32 sctpi_p_srtt;
- __u32 sctpi_p_rto;
- __u32 sctpi_p_hbinterval;
- __u32 sctpi_p_pathmaxrxt;
- __u32 sctpi_p_sackdelay;
- __u32 sctpi_p_sackfreq;
- __u32 sctpi_p_ssthresh;
- __u32 sctpi_p_partial_bytes_acked;
- __u32 sctpi_p_flight_size;
- __u16 sctpi_p_error;
- __u16 __reserved2;
-
- /* sctp sock info */
- __u32 sctpi_s_autoclose;
- __u32 sctpi_s_adaptation_ind;
- __u32 sctpi_s_pd_point;
- __u8 sctpi_s_nodelay;
- __u8 sctpi_s_disable_fragments;
- __u8 sctpi_s_v4mapped;
- __u8 sctpi_s_frag_interleave;
- __u32 sctpi_s_type;
- __u32 __reserved3;
-};
-
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 2296e6b2f..ecc296c13 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -28,19 +28,13 @@ struct seccomp {
};
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-extern int __secure_computing(void);
-static inline int secure_computing(void)
+extern int __secure_computing(const struct seccomp_data *sd);
+static inline int secure_computing(const struct seccomp_data *sd)
{
if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing();
+ return __secure_computing(sd);
return 0;
}
-
-#define SECCOMP_PHASE1_OK 0
-#define SECCOMP_PHASE1_SKIP 1
-
-extern u32 seccomp_phase1(struct seccomp_data *sd);
-int seccomp_phase2(u32 phase1_result);
#else
extern void secure_computing_strict(int this_syscall);
#endif
@@ -61,7 +55,7 @@ struct seccomp { };
struct seccomp_filter { };
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-static inline int secure_computing(void) { return 0; }
+static inline int secure_computing(struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 14df373ff..7831cd57b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -240,7 +240,7 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb);
int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_dentry_init_security(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen);
int security_inode_alloc(struct inode *inode);
@@ -591,7 +591,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
- struct qstr *name,
+ const struct qstr *name,
void **ctx,
u32 *ctxlen)
{
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a3d7c0d4a..2f44e2013 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -352,9 +352,15 @@ struct earlycon_id {
extern const struct earlycon_id __earlycon_table[];
extern const struct earlycon_id __earlycon_table_end[];
+#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
+#define EARLYCON_USED_OR_UNUSED __used
+#else
+#define EARLYCON_USED_OR_UNUSED __maybe_unused
+#endif
+
#define OF_EARLYCON_DECLARE(_name, compat, fn) \
static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
- __used __section(__earlycon_table) \
+ EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
= { .name = __stringify(_name), \
.compatible = compat, \
.setup = fn }
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index d9b436f09..e0e1597ef 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -156,6 +156,7 @@ struct sfi_device_table_entry {
#define SFI_DEV_TYPE_UART 2
#define SFI_DEV_TYPE_HSI 3
#define SFI_DEV_TYPE_IPC 4
+#define SFI_DEV_TYPE_SD 5
u8 host_num; /* attached to host 0, 1...*/
u16 addr;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 665cd8591..ff078e704 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -16,8 +16,9 @@ struct shmem_inode_info {
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- struct shared_policy policy; /* NUMA memory alloc policy */
+ struct list_head shrinklist; /* shrinkable hpage inodes */
struct list_head swaplist; /* chain of maybes on swap */
+ struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
struct inode vfs_inode;
};
@@ -28,10 +29,14 @@ struct shmem_sb_info {
unsigned long max_inodes; /* How many inodes are allowed */
unsigned long free_inodes; /* How many are left for allocation */
spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ umode_t mode; /* Mount mode for root directory */
+ unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
- umode_t mode; /* Mount mode for root directory */
struct mempolicy *mpol; /* default memory policy for mappings */
+ spinlock_t shrinklist_lock; /* Protects shrinklist */
+ struct list_head shrinklist; /* List of shinkable inodes */
+ unsigned long shrinklist_len; /* Length of shrinklist */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -45,11 +50,12 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
extern int shmem_init(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
extern struct file *shmem_file_setup(const char *name,
- loff_t size, unsigned long flags,
- int atomic_copy);
+ loff_t size, unsigned long flags);
extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
- unsigned long flags, int atomic_copy);
+ unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
+extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
extern bool shmem_mapping(struct address_space *mapping);
extern void shmem_unlock_mapping(struct address_space *mapping);
@@ -62,6 +68,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
+/* Flag allocation requirements to shmem_getpage */
+enum sgp_type {
+ SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_CACHE, /* don't exceed i_size, may allocate page */
+ SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
+ SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
+ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
+ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
+};
+
+extern int shmem_getpage(struct inode *inode, pgoff_t index,
+ struct page **pagep, enum sgp_type sgp);
+
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
{
@@ -69,6 +88,18 @@ static inline struct page *shmem_read_mapping_page(
mapping_gfp_mask(mapping));
}
+static inline bool shmem_file(struct file *file)
+{
+ if (!IS_ENABLED(CONFIG_SHMEM))
+ return false;
+ if (!file || !file->f_mapping)
+ return false;
+ return shmem_mapping(file->f_mapping);
+}
+
+extern bool shmem_charge(struct inode *inode, long pages);
+extern void shmem_uncharge(struct inode *inode, long pages);
+
#ifdef CONFIG_TMPFS
extern int shmem_add_seals(struct file *file, unsigned int seals);
@@ -84,4 +115,13 @@ static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a)
#endif
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+#else
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
new file mode 100644
index 000000000..f4dfade42
--- /dev/null
+++ b/include/linux/skb_array.h
@@ -0,0 +1,178 @@
+/*
+ * Definitions for the 'struct skb_array' datastructure.
+ *
+ * Author:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Limited-size FIFO of skbs. Can be used more or less whenever
+ * sk_buff_head can be used, except you need to know the queue size in
+ * advance.
+ * Implemented as a type-safe wrapper around ptr_ring.
+ */
+
+#ifndef _LINUX_SKB_ARRAY_H
+#define _LINUX_SKB_ARRAY_H 1
+
+#ifdef __KERNEL__
+#include <linux/ptr_ring.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#endif
+
+struct skb_array {
+ struct ptr_ring ring;
+};
+
+/* Might be slightly faster than skb_array_full below, but callers invoking
+ * this in a loop must use a compiler barrier, for example cpu_relax().
+ */
+static inline bool __skb_array_full(struct skb_array *a)
+{
+ return __ptr_ring_full(&a->ring);
+}
+
+static inline bool skb_array_full(struct skb_array *a)
+{
+ return ptr_ring_full(&a->ring);
+}
+
+static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
+{
+ return ptr_ring_produce(&a->ring, skb);
+}
+
+static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
+{
+ return ptr_ring_produce_irq(&a->ring, skb);
+}
+
+static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
+{
+ return ptr_ring_produce_bh(&a->ring, skb);
+}
+
+static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
+{
+ return ptr_ring_produce_any(&a->ring, skb);
+}
+
+/* Might be slightly faster than skb_array_empty below, but only safe if the
+ * array is never resized. Also, callers invoking this in a loop must take care
+ * to use a compiler barrier, for example cpu_relax().
+ */
+static inline bool __skb_array_empty(struct skb_array *a)
+{
+ return !__ptr_ring_peek(&a->ring);
+}
+
+static inline bool skb_array_empty(struct skb_array *a)
+{
+ return ptr_ring_empty(&a->ring);
+}
+
+static inline bool skb_array_empty_bh(struct skb_array *a)
+{
+ return ptr_ring_empty_bh(&a->ring);
+}
+
+static inline bool skb_array_empty_irq(struct skb_array *a)
+{
+ return ptr_ring_empty_irq(&a->ring);
+}
+
+static inline bool skb_array_empty_any(struct skb_array *a)
+{
+ return ptr_ring_empty_any(&a->ring);
+}
+
+static inline struct sk_buff *skb_array_consume(struct skb_array *a)
+{
+ return ptr_ring_consume(&a->ring);
+}
+
+static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
+{
+ return ptr_ring_consume_irq(&a->ring);
+}
+
+static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
+{
+ return ptr_ring_consume_any(&a->ring);
+}
+
+static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
+{
+ return ptr_ring_consume_bh(&a->ring);
+}
+
+static inline int __skb_array_len_with_tag(struct sk_buff *skb)
+{
+ if (likely(skb)) {
+ int len = skb->len;
+
+ if (skb_vlan_tag_present(skb))
+ len += VLAN_HLEN;
+
+ return len;
+ } else {
+ return 0;
+ }
+}
+
+static inline int skb_array_peek_len(struct skb_array *a)
+{
+ return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
+}
+
+static inline int skb_array_peek_len_irq(struct skb_array *a)
+{
+ return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
+}
+
+static inline int skb_array_peek_len_bh(struct skb_array *a)
+{
+ return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
+}
+
+static inline int skb_array_peek_len_any(struct skb_array *a)
+{
+ return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
+}
+
+static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
+{
+ return ptr_ring_init(&a->ring, size, gfp);
+}
+
+static void __skb_array_destroy_skb(void *ptr)
+{
+ kfree_skb(ptr);
+}
+
+static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
+{
+ return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
+}
+
+static inline int skb_array_resize_multiple(struct skb_array **rings,
+ int nrings, int size, gfp_t gfp)
+{
+ BUILD_BUG_ON(offsetof(struct skb_array, ring));
+ return ptr_ring_resize_multiple((struct ptr_ring **)rings,
+ nrings, size, gfp,
+ __skb_array_destroy_skb);
+}
+
+static inline void skb_array_cleanup(struct skb_array *a)
+{
+ ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
+}
+
+#endif /* _LINUX_SKB_ARRAY_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f39b37180..0f665cb26 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#include <net/flow_dissector.h>
#include <linux/splice.h>
#include <linux/in6.h>
+#include <linux/if_packet.h>
#include <net/flow.h>
/* The interface for checksum offload between the stack and networking drivers
@@ -301,6 +302,11 @@ struct sk_buff;
#endif
extern int sysctl_max_skb_frags;
+/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
+ * segment using its current segmentation instead.
+ */
+#define GSO_BY_FRAGS 0xFFFF
+
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
@@ -482,6 +488,8 @@ enum {
SKB_GSO_PARTIAL = 1 << 13,
SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
+
+ SKB_GSO_SCTP = 1 << 15,
};
#if BITS_PER_LONG > 32
@@ -874,6 +882,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
return (struct rtable *)skb_dst(skb);
}
+/* For mangling skb->pkt_type from user space side from applications
+ * such as nft, tc, etc, we only allow a conservative subset of
+ * possible pkt_types to be set.
+*/
+static inline bool skb_pkt_type_ok(u32 ptype)
+{
+ return ptype <= PACKET_OTHERHOST;
+}
+
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
@@ -2830,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
__skb_linearize(skb) : 0;
}
+static __always_inline void
+__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_sub(skb->csum,
+ csum_partial(start, len, 0), off);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
@@ -2840,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
-
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
- else if (skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_start_offset(skb) < 0)
- skb->ip_summed = CHECKSUM_NONE;
+ __skb_postpull_rcsum(skb, start, len, 0);
}
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static __always_inline void
+__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_add(skb->csum,
+ csum_partial(start, len, 0), off);
+}
+/**
+ * skb_postpush_rcsum - update checksum for received skb after push
+ * @skb: buffer to update
+ * @start: start of data after push
+ * @len: length of data pushed
+ *
+ * After doing a push on a received packet, you need to call this to
+ * update the CHECKSUM_COMPLETE checksum.
+ */
static inline void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- /* For performing the reverse operation to skb_postpull_rcsum(),
- * we can instead of ...
- *
- * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
- *
- * ... just use this equivalent version here to save a few
- * instructions. Feeding csum of 0 in csum_partial() and later
- * on adding skb->csum is equivalent to feed skb->csum in the
- * first place.
- */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_partial(start, len, skb->csum);
+ __skb_postpush_rcsum(skb, start, len, 0);
}
+unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+
/**
* skb_push_rcsum - push skb and update receive checksum
* @skb: buffer to update
@@ -3007,6 +3038,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index aeb3e6d00..4293808d8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
@@ -565,6 +577,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
if (size != 0 && n > SIZE_MAX / size)
return NULL;
+ if (__builtin_constant_p(n) && __builtin_constant_p(size))
+ return kmalloc(n * size, flags);
return __kmalloc(n * size, flags);
}
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8694f7a5d..4ad2c5a26 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -81,14 +81,15 @@ struct kmem_cache {
#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
- void *random_seq;
+ unsigned int *random_seq;
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
- void *x) {
+ void *x)
+{
void *object = x - (x - page->s_mem) % cache->size;
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d1faa019c..75f56c2ef 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,6 +99,15 @@ struct kmem_cache {
*/
int remote_node_defrag_ratio;
#endif
+
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ unsigned int *random_seq;
+#endif
+
+#ifdef CONFIG_KASAN
+ struct kasan_cache kasan_info;
+#endif
+
struct kmem_cache_node *node[MAX_NUMNODES];
};
@@ -114,15 +123,17 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason);
+void *fixup_red_left(struct kmem_cache *s, void *p);
+
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
void *x) {
void *object = x - (x - page_address(page)) % cache->size;
void *last_object = page_address(page) +
(page->objects - 1) * cache->size;
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
+ void *result = (unlikely(object > last_object)) ? last_object : object;
+
+ result = fixup_red_left(cache, result);
+ return result;
}
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c4414074b..eccae4690 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
+/* SMP core functions */
+int smpcfd_prepare_cpu(unsigned int cpu);
+int smpcfd_dead_cpu(unsigned int cpu);
+int smpcfd_dying_cpu(unsigned int cpu);
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
new file mode 100644
index 000000000..a37bc5538
--- /dev/null
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -0,0 +1,8 @@
+#ifndef __WCNSS_CTRL_H__
+#define __WCNSS_CTRL_H__
+
+#include <linux/soc/qcom/smd.h>
+
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+
+#endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
index 92fc613ab..7b8b280c1 100644
--- a/include/linux/soc/renesas/rcar-sysc.h
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -11,6 +11,6 @@ struct rcar_sysc_ch {
int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
-void __iomem *rcar_sysc_init(phys_addr_t base);
+void rcar_sysc_init(phys_addr_t base, u32 syscier);
#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 1f03483f6..072cb2aa2 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -312,8 +312,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @flags: other constraints relevant to this driver
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+ * @io_mutex: mutex for physical bus access
* @bus_lock_spinlock: spinlock for SPI bus locking
- * @bus_lock_mutex: mutex for SPI bus locking
+ * @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
@@ -446,6 +447,9 @@ struct spi_master {
*/
size_t (*max_transfer_size)(struct spi_device *spi);
+ /* I/O mutex */
+ struct mutex io_mutex;
+
/* lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
@@ -1143,6 +1147,8 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* @opcode_nbits: number of lines to send opcode
* @addr_nbits: number of lines to send address
* @data_nbits: number of lines for data
+ * @rx_sg: Scatterlist for receive data read from flash
+ * @cur_msg_mapped: message has been mapped for DMA
*/
struct spi_flash_read_message {
void *buf;
@@ -1155,6 +1161,8 @@ struct spi_flash_read_message {
u8 opcode_nbits;
u8 addr_nbits;
u8 data_nbits;
+ struct sg_table rx_sg;
+ bool cur_msg_mapped;
};
/* SPI core interface for flash read support */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 8b3ac0d71..0d9848de6 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -6,6 +6,7 @@
#endif
#include <asm/processor.h> /* for cpu_relax() */
+#include <asm/barrier.h>
/*
* include/linux/spinlock_up.h - UP-debug version of spinlocks.
@@ -25,6 +26,11 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define arch_spin_is_locked(x) ((x)->slock == 0)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->slock, VAL);
+}
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
@@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
@@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_read_can_lock(lock) (((void)(lock), 1))
#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define arch_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (arch_spin_is_locked(lock))
-
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
new file mode 100644
index 000000000..6780fdb0a
--- /dev/null
+++ b/include/linux/sradix-tree.h
@@ -0,0 +1,77 @@
+#ifndef _LINUX_SRADIX_TREE_H
+#define _LINUX_SRADIX_TREE_H
+
+
+#define INIT_SRADIX_TREE(root, mask) \
+do { \
+ (root)->height = 0; \
+ (root)->gfp_mask = (mask); \
+ (root)->rnode = NULL; \
+} while (0)
+
+#define ULONG_BITS (sizeof(unsigned long) * 8)
+#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+//#define SRADIX_TREE_MAP_SHIFT 6
+//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
+//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
+
+struct sradix_tree_node {
+ unsigned int height; /* Height from the bottom */
+ unsigned int count;
+ unsigned int fulls; /* Number of full sublevel trees */
+ struct sradix_tree_node *parent;
+ void *stores[0];
+};
+
+/* A simple radix tree implementation */
+struct sradix_tree_root {
+ unsigned int height;
+ struct sradix_tree_node *rnode;
+
+ /* Where found to have available empty stores in its sublevels */
+ struct sradix_tree_node *enter_node;
+ unsigned int shift;
+ unsigned int stores_size;
+ unsigned int mask;
+ unsigned long min; /* The first hole index */
+ unsigned long num;
+ //unsigned long *height_to_maxindex;
+
+ /* How the node is allocated and freed. */
+ struct sradix_tree_node *(*alloc)(void);
+ void (*free)(struct sradix_tree_node *node);
+
+ /* When a new node is added and removed */
+ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
+ void (*assign)(struct sradix_tree_node *node, unsigned index, void *item);
+ void (*rm)(struct sradix_tree_node *node, unsigned offset);
+};
+
+struct sradix_tree_path {
+ struct sradix_tree_node *node;
+ int offset;
+};
+
+static inline
+void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
+{
+ root->height = 0;
+ root->rnode = NULL;
+ root->shift = shift;
+ root->stores_size = 1UL << shift;
+ root->mask = root->stores_size - 1;
+}
+
+
+extern void *sradix_tree_next(struct sradix_tree_root *root,
+ struct sradix_tree_node *node, unsigned long index,
+ int (*iter)(void *, unsigned long));
+
+extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
+
+extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
+ struct sradix_tree_node *node, unsigned long index);
+
+extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
+
+#endif /* _LINUX_SRADIX_TREE_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index ffdaca9c0..705840e04 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -135,9 +135,12 @@ struct plat_stmmacenet_data {
void (*bus_setup)(void __iomem *ioaddr);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
+ void (*suspend)(struct platform_device *pdev, void *priv);
+ void (*resume)(struct platform_device *pdev, void *priv);
void *bsp_priv;
struct stmmac_axi *axi;
int has_gmac4;
bool tso_en;
+ int mac_port_sel_speed;
};
#endif
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
index 451771d9b..7c2d95170 100644
--- a/include/linux/stringhash.h
+++ b/include/linux/stringhash.h
@@ -3,6 +3,7 @@
#include <linux/compiler.h> /* For __pure */
#include <linux/types.h> /* For u32, u64 */
+#include <linux/hash.h>
/*
* Routines for hashing strings of bytes to a 32-bit hash value.
@@ -34,7 +35,7 @@
*/
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
-#define init_name_hash() 0
+#define init_name_hash(salt) (unsigned long)(salt)
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
@@ -45,11 +46,12 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
/*
* Finally: cut down the number of bits to a int value (and try to avoid
- * losing bits)
+ * losing bits). This also has the property (wanted by the dcache)
+ * that the msbits make a good hash table index.
*/
static inline unsigned long end_name_hash(unsigned long hash)
{
- return (unsigned int)hash;
+ return __hash_32((unsigned int)hash);
}
/*
@@ -60,7 +62,7 @@ static inline unsigned long end_name_hash(unsigned long hash)
*
* If not set, this falls back to a wrapper around the preceding.
*/
-extern unsigned int __pure full_name_hash(const char *, unsigned int);
+extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int);
/*
* A hash_len is a u64 with the hash of a string in the low
@@ -71,6 +73,6 @@ extern unsigned int __pure full_name_hash(const char *, unsigned int);
#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
/* Return the "hash_len" (hash and length) of a null-terminated string */
-extern u64 __pure hashlen_string(const char *name);
+extern u64 __pure hashlen_string(const void *salt, const char *name);
#endif /* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 899791573..4ccf184e9 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -37,7 +37,6 @@ struct rpcsec_gss_info;
/* auth_cred ac_flags bits */
enum {
- RPC_CRED_NO_CRKEY_TIMEOUT = 0, /* underlying cred has no key timeout */
RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
key will expire soon */
@@ -82,6 +81,9 @@ struct rpc_cred {
#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
+/* rpc_auth au_flags */
+#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */
+
/*
* Client authentication handle
*/
@@ -107,6 +109,9 @@ struct rpc_auth {
/* per-flavor data */
};
+/* rpc_auth au_flags */
+#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
const char *target_name;
@@ -196,7 +201,7 @@ void rpcauth_destroy_credcache(struct rpc_auth *);
void rpcauth_clear_credcache(struct rpc_cred_cache *);
int rpcauth_key_timeout_notify(struct rpc_auth *,
struct rpc_cred *);
-bool rpcauth_cred_key_to_expire(struct rpc_cred *);
+bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index ed03c9f7f..62a60eeac 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -78,8 +78,6 @@ struct cache_detail {
struct hlist_head * hash_table;
rwlock_t hash_lock;
- atomic_t inuse; /* active user-space update or lookup */
-
char *name;
void (*cache_put)(struct kref *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index b6810c92b..5c02b0691 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -195,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
struct rpc_xprt *,
void *),
void *data);
+void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+ unsigned long timeo);
const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 1f911ccb2..68ec78c1a 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -73,6 +73,7 @@ u32 gss_delete_sec_context(
rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop,
u32 service);
u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
+bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor);
char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
struct pf_desc {
@@ -81,6 +82,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ bool datatouch;
};
/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 05a1809c4..817af0b43 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -230,6 +230,10 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
+struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *,
+ bool (*)(struct rpc_task *, void *),
+ void *);
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
bool (*)(struct rpc_task *, void *),
void *);
@@ -247,6 +251,7 @@ void rpc_show_tasks(struct net *);
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 7ca44fb5b..7321ae933 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -268,6 +268,7 @@ struct svc_rqst {
* cache pages */
#define RQ_VICTIM (5) /* about to be shut down */
#define RQ_BUSY (6) /* request is busy */
+#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
void * rq_argp; /* decoded arguments */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79ba50856..ab02a457d 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,7 +25,6 @@ struct svc_xprt_ops {
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
int (*xpo_secure_port)(struct svc_rqst *);
- void (*xpo_adjust_wspace)(struct svc_xprt *);
};
struct svc_xprt_class {
@@ -69,6 +68,7 @@ struct svc_xprt {
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
+ atomic_t xpt_nr_rqsts; /* Number of requests */
struct mutex xpt_mutex; /* to serialize sending data */
spinlock_t xpt_lock; /* protects sk_deferred
* and xpt_auth_cache */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 91d5a5d6f..d03932055 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -172,12 +172,12 @@ extern void unix_gid_cache_destroy(struct net *net);
*/
static inline unsigned long hash_str(char const *name, int bits)
{
- return hashlen_hash(hashlen_string(name)) >> (32 - bits);
+ return hashlen_hash(hashlen_string(NULL, name)) >> (32 - bits);
}
static inline unsigned long hash_mem(char const *buf, int length, int bits)
{
- return full_name_hash(buf, length) >> (32 - bits);
+ return full_name_hash(NULL, buf, length) >> (32 - bits);
}
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5e3e1b63d..a16070dd0 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -218,7 +218,8 @@ struct rpc_xprt {
struct work_struct task_cleanup;
struct timer_list timer;
unsigned long last_used,
- idle_timeout;
+ idle_timeout,
+ max_reconnect_timeout;
/*
* Send stuff
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 0ece4ba06..bef3fb0ab 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -80,6 +80,7 @@ struct sock_xprt {
#define TCP_RPC_REPLY (1UL << 6)
#define XPRT_SOCK_CONNECTING 1U
+#define XPRT_SOCK_DATA_READY (2)
#endif /* __KERNEL__ */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0e527a70e..7693e39b1 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch)
#endif
#ifdef CONFIG_VT_CONSOLE_SLEEP
-extern int pm_prepare_console(void);
+extern void pm_prepare_console(void);
extern void pm_restore_console(void);
#else
-static inline int pm_prepare_console(void)
+static inline void pm_prepare_console(void)
{
- return 0;
}
static inline void pm_restore_console(void)
@@ -491,74 +490,6 @@ extern bool pm_print_times_enabled;
#define pm_print_times_enabled (false)
#endif
-enum {
- TOI_CAN_HIBERNATE,
- TOI_CAN_RESUME,
- TOI_RESUME_DEVICE_OK,
- TOI_NORESUME_SPECIFIED,
- TOI_SANITY_CHECK_PROMPT,
- TOI_CONTINUE_REQ,
- TOI_RESUMED_BEFORE,
- TOI_BOOT_TIME,
- TOI_NOW_RESUMING,
- TOI_IGNORE_LOGLEVEL,
- TOI_TRYING_TO_RESUME,
- TOI_LOADING_ALT_IMAGE,
- TOI_STOP_RESUME,
- TOI_IO_STOPPED,
- TOI_NOTIFIERS_PREPARE,
- TOI_CLUSTER_MODE,
- TOI_BOOT_KERNEL,
- TOI_DEVICE_HOTPLUG_LOCKED,
-};
-
-#ifdef CONFIG_TOI
-
-/* Used in init dir files */
-extern unsigned long toi_state;
-#define set_toi_state(bit) (set_bit(bit, &toi_state))
-#define clear_toi_state(bit) (clear_bit(bit, &toi_state))
-#define test_toi_state(bit) (test_bit(bit, &toi_state))
-extern int toi_running;
-
-#define test_action_state(bit) (test_bit(bit, &toi_bkd.toi_action))
-extern int try_tuxonice_hibernate(void);
-
-#else /* !CONFIG_TOI */
-
-#define toi_state (0)
-#define set_toi_state(bit) do { } while (0)
-#define clear_toi_state(bit) do { } while (0)
-#define test_toi_state(bit) (0)
-#define toi_running (0)
-
-static inline int try_tuxonice_hibernate(void) { return 0; }
-#define test_action_state(bit) (0)
-
-#endif /* CONFIG_TOI */
-
-#ifdef CONFIG_HIBERNATION
-#ifdef CONFIG_TOI
-extern void try_tuxonice_resume(void);
-#else
-#define try_tuxonice_resume() do { } while (0)
-#endif
-
-extern int resume_attempted;
-extern int software_resume(void);
-
-static inline void check_resume_attempted(void)
-{
- if (resume_attempted)
- return;
-
- software_resume();
-}
-#else
-#define check_resume_attempted() do { } while (0)
-#define resume_attempted (0)
-#endif
-
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ed2696a09..e1d761463 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -157,15 +157,6 @@ enum {
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
-/*
- * Ratio between zone->managed_pages and the "gap" that above the per-zone
- * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that
- * do not meet the (high_wmark + gap) watermark, even which already met the
- * high_wmark, in order to provide better per-zone lru behavior. We are ok to
- * spend not more than 1% of the memory for this zone balancing "gap".
- */
-#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
-
#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
@@ -266,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{
+ VM_WARN_ON_ONCE(!workingset_node_pages(node));
node->count--;
}
@@ -281,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{
+ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
}
@@ -288,7 +281,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
extern unsigned long nr_free_buffer_pages(void);
-extern unsigned long nr_unallocated_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);
/* Definition of global_page_state not available yet */
@@ -318,6 +310,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
+extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
@@ -325,25 +318,24 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
bool may_swap);
-extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
- struct zone *zone,
+ pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
-extern unsigned long shrink_memory_mask(unsigned long nr_to_reclaim,
- gfp_t mask);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
#ifdef CONFIG_NUMA
-extern int zone_reclaim_mode;
+extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
-extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
+extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
#else
-#define zone_reclaim_mode 0
-static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
+#define node_reclaim_mode 0
+static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
+ unsigned int order)
{
return 0;
}
@@ -417,18 +409,14 @@ extern void swapcache_free(swp_entry_t);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_entry(swp_entry_t entry, struct block_device **);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int page_swapcount(struct page *);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-extern void get_swap_range_of_type(int type, swp_entry_t *start,
- swp_entry_t *end, unsigned int limit);
#else /* CONFIG_SWAP */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 017fced60..5f81f8a18 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
struct device;
-struct dma_attrs;
struct page;
struct scatterlist;
@@ -68,10 +67,10 @@ swiotlb_free_coherent(struct device *hwdev, size_t size,
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
@@ -83,12 +82,13 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir,
+ unsigned long attrs);
extern void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index fa7bc2992..a4f7203a9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -28,6 +28,7 @@
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
+struct completion;
struct ctl_table;
struct nsproxy;
struct ctl_table_root;
@@ -41,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index b92944c55..2b5b10eed 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -57,9 +57,9 @@ extern long do_no_restart_syscall(struct restart_block *parm);
#ifdef CONFIG_DEBUG_STACK_USAGE
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
- ___GFP_TOI_NOTRACK | __GFP_ZERO)
+ __GFP_ZERO)
#else
-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | ___GFP_TOI_NOTRACK | __GFP_NOTRACK)
+# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
#endif
/*
@@ -105,46 +105,30 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
-/*
- * An arch can define its own version of set_restore_sigmask() to get the
- * job done however works, with or without TIF_RESTORE_SIGMASK.
- */
-#define HAVE_SET_RESTORE_SIGMASK 1
-
-/**
- * set_restore_sigmask() - make sure saved_sigmask processing gets done
- *
- * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
- * will run before returning to user mode, to process the flag. For
- * all callers, TIF_SIGPENDING is already set or it's no harm to set
- * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
- * arch code will notice on return to user mode, in case those bits
- * are scarce. We set TIF_SIGPENDING here to ensure that the arch
- * signal code always gets run when TIF_RESTORE_SIGMASK is set.
- */
-static inline void set_restore_sigmask(void)
-{
- set_thread_flag(TIF_RESTORE_SIGMASK);
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
-}
-static inline void clear_restore_sigmask(void)
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
{
- clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return 0;
}
-static inline bool test_restore_sigmask(void)
-{
- return test_thread_flag(TIF_RESTORE_SIGMASK);
-}
-static inline bool test_and_clear_restore_sigmask(void)
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
- return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+ if (!__builtin_constant_p(n))
+ __check_object_size(ptr, n, to_user);
}
-#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
-
-#ifndef HAVE_SET_RESTORE_SIGMASK
-#error "no set_restore_sigmask() provided and default one won't work"
-#endif
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
#endif /* __KERNEL__ */
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 0a0d56834..f2293028a 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -71,7 +71,7 @@ struct st_proto_s {
enum proto_type type;
long (*recv) (void *, struct sk_buff *);
unsigned char (*match_packet) (const unsigned char *data);
- void (*reg_complete_cb) (void *, char data);
+ void (*reg_complete_cb) (void *, int data);
long (*write) (struct sk_buff *skb);
void *priv_data;
diff --git a/include/linux/time.h b/include/linux/time.h
index 297f09f23..4cea09d94 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -205,7 +205,20 @@ struct tm {
int tm_yday;
};
-void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
+
+/**
+ * time_to_tm - converts the calendar time to local broken-down time
+ *
+ * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ * Coordinated Universal Time (UTC).
+ * @offset offset seconds adding to totalsecs.
+ * @result pointer to struct tm variable to receive broken-down time
+ */
+static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+{
+ time64_to_tm(totalsecs, offset, result);
+}
/**
* timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 20ac746f3..51d601f19 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
void (*function)(unsigned long);
unsigned long data;
u32 flags;
- int slack;
#ifdef CONFIG_TIMER_STATS
int start_pid;
@@ -58,11 +57,14 @@ struct timer_list {
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_CPUMASK 0x0007FFFF
-#define TIMER_MIGRATING 0x00080000
+#define TIMER_CPUMASK 0x0003FFFF
+#define TIMER_MIGRATING 0x00040000
#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
-#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_DEFERRABLE 0x00080000
+#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
+#define TIMER_ARRAYSHIFT 22
+#define TIMER_ARRAYMASK 0xFFC00000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
@@ -70,7 +72,6 @@ struct timer_list {
.expires = (_expires), \
.data = (_data), \
.flags = (_flags), \
- .slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
}
@@ -78,9 +79,15 @@ struct timer_list {
#define TIMER_INITIALIZER(_function, _expires, _data) \
__TIMER_INITIALIZER((_function), (_expires), (_data), 0)
+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
+
#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
+
#define DEFINE_TIMER(_name, _function, _expires, _data) \
struct timer_list _name = \
TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define init_timer(timer) \
__init_timer((timer), 0)
+#define init_timer_pinned(timer) \
+ __init_timer((timer), TIMER_PINNED)
#define init_timer_deferrable(timer) \
__init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_pinned_deferrable(timer) \
+ __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
#define init_timer_on_stack(timer) \
__init_timer_on_stack((timer), 0)
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define setup_timer(timer, fn, data) \
__setup_timer((timer), (fn), (data), 0)
+#define setup_pinned_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), TIMER_PINNED)
#define setup_deferrable_timer(timer, fn, data) \
__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
#define setup_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_pinned_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
#define setup_deferrable_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
/**
* timer_pending - is a timer pending?
@@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
-
-extern void set_timer_slack(struct timer_list *time, int slack_hz);
-#define TIMER_NOT_PINNED 0
-#define TIMER_PINNED 1
/*
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
@@ -259,4 +273,10 @@ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
+#ifdef CONFIG_HOTPLUG_CPU
+int timers_dead_cpu(unsigned int cpu);
+#else
+#define timers_dead_cpu NULL
+#endif
+
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index afce69296..cb0775e1e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -54,7 +54,7 @@ int arch_update_cpu_topology(void);
/*
* If the distance between nodes in a system is larger than RECLAIM_DISTANCE
* (in whatever arch specific measurement units returned by node_distance())
- * and zone_reclaim_mode is enabled then the VM will only call zone_reclaim()
+ * and node_reclaim_mode is enabled then the VM will only call node_reclaim()
* on nodes within this distance.
*/
#define RECLAIM_DISTANCE 30
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7759fc3c6..6685a7373 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -50,6 +50,10 @@
do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
/* Definitions for online/offline exerciser. */
+bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
+ unsigned long *sum_offl, int *min_onl, int *max_onl);
+bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
+ unsigned long *sum_onl, int *min_onl, int *max_onl);
int torture_onoff_init(long ooholdoff, long oointerval);
void torture_onoff_stats(void);
bool torture_onoff_failures(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 706e63eea..da158f06e 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -33,7 +33,12 @@ struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+enum TPM_OPS_FLAGS {
+ TPM_OPS_AUTO_STARTUP = BIT(0),
+};
+
struct tpm_class_ops {
+ unsigned int flags;
const u8 req_complete_mask;
const u8 req_complete_val;
bool (*req_canceled)(struct tpm_chip *chip, u8 status);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 349557825..f30c187ed 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index 03835522d..25e9d9216 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -177,12 +177,12 @@ static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid)
static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
{
- return true;
+ return uid_valid(uid);
}
static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
{
- return true;
+ return gid_valid(gid);
}
#endif /* CONFIG_USER_NS */
diff --git a/include/linux/uksm.h b/include/linux/uksm.h
new file mode 100644
index 000000000..825f05e0e
--- /dev/null
+++ b/include/linux/uksm.h
@@ -0,0 +1,149 @@
+#ifndef __LINUX_UKSM_H
+#define __LINUX_UKSM_H
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork().
+ */
+
+/* if !CONFIG_UKSM this file should not be compiled at all. */
+#ifdef CONFIG_UKSM
+
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/sched.h>
+
+extern unsigned long zero_pfn __read_mostly;
+extern unsigned long uksm_zero_pfn __read_mostly;
+extern struct page *empty_uksm_zero_page;
+
+/* must be done before linked to mm */
+extern void uksm_vma_add_new(struct vm_area_struct *vma);
+extern void uksm_remove_vma(struct vm_area_struct *vma);
+
+#define UKSM_SLOT_NEED_SORT (1 << 0)
+#define UKSM_SLOT_NEED_RERAND (1 << 1)
+#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
+#define UKSM_SLOT_FUL_SCANNED (1 << 3)
+#define UKSM_SLOT_IN_UKSM (1 << 4)
+
+struct vma_slot {
+ struct sradix_tree_node *snode;
+ unsigned long sindex;
+
+ struct list_head slot_list;
+ unsigned long fully_scanned_round;
+ unsigned long dedup_num;
+ unsigned long pages_scanned;
+ unsigned long this_sampled;
+ unsigned long last_scanned;
+ unsigned long pages_to_scan;
+ struct scan_rung *rung;
+ struct page **rmap_list_pool;
+ unsigned int *pool_counts;
+ unsigned long pool_size;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long ctime_j;
+ unsigned long pages;
+ unsigned long flags;
+ unsigned long pages_cowed; /* pages cowed this round */
+ unsigned long pages_merged; /* pages merged this round */
+ unsigned long pages_bemerged;
+
+ /* when it has page merged in this eval round */
+ struct list_head dedup_list;
+};
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+ if (pte_pfn(pte) == uksm_zero_pfn)
+ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+ if (pte_pfn(pte) == uksm_zero_pfn)
+ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (vma->uksm_vma_slot && PageKsm(page))
+ vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
+ vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+#ifdef VM_SAO
+ if (vm_flags & VM_SAO)
+ return 0;
+#endif
+
+ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
+ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN));
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+ if (uksm_flags_can_scan(*vm_flags_p))
+ *vm_flags_p |= VM_MERGEABLE;
+}
+
+/*
+ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
+ * be removed when uksm zero page patch is stable enough.
+ */
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+ BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
+}
+#else
+static inline void uksm_vma_add_new(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_remove_vma(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+ return 0;
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+}
+
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+}
+#endif /* !CONFIG_UKSM */
+#endif /* __LINUX_UKSM_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index fefe8b06a..612dbdfa3 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -25,6 +25,8 @@
#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
+#define UDC_TRACE_STR_MAX 512
+
struct usb_ep;
/**
@@ -228,307 +230,49 @@ struct usb_ep {
/*-------------------------------------------------------------------------*/
-/**
- * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
- * @ep:the endpoint being configured
- * @maxpacket_limit:value of maximum packet size limit
- *
- * This function should be used only in UDC drivers to initialize endpoint
- * (usually in probe function).
- */
+#if IS_ENABLED(CONFIG_USB_GADGET)
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit);
+int usb_ep_enable(struct usb_ep *ep);
+int usb_ep_disable(struct usb_ep *ep);
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req);
+int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags);
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
+int usb_ep_set_halt(struct usb_ep *ep);
+int usb_ep_clear_halt(struct usb_ep *ep);
+int usb_ep_set_wedge(struct usb_ep *ep);
+int usb_ep_fifo_status(struct usb_ep *ep);
+void usb_ep_fifo_flush(struct usb_ep *ep);
+#else
static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
- unsigned maxpacket_limit)
-{
- ep->maxpacket_limit = maxpacket_limit;
- ep->maxpacket = maxpacket_limit;
-}
-
-/**
- * usb_ep_enable - configure endpoint, making it usable
- * @ep:the endpoint being configured. may not be the endpoint named "ep0".
- * drivers discover endpoints through the ep_list of a usb_gadget.
- *
- * When configurations are set, or when interface settings change, the driver
- * will enable or disable the relevant endpoints. while it is enabled, an
- * endpoint may be used for i/o until the driver receives a disconnect() from
- * the host or until the endpoint is disabled.
- *
- * the ep0 implementation (which calls this routine) must ensure that the
- * hardware capabilities of each endpoint match the descriptor provided
- * for it. for example, an endpoint named "ep2in-bulk" would be usable
- * for interrupt transfers as well as bulk, but it likely couldn't be used
- * for iso transfers or for endpoint 14. some endpoints are fully
- * configurable, with more generic names like "ep-a". (remember that for
- * USB, "in" means "towards the USB master".)
- *
- * returns zero, or a negative error code.
- */
+ unsigned maxpacket_limit)
+{ }
static inline int usb_ep_enable(struct usb_ep *ep)
-{
- int ret;
-
- if (ep->enabled)
- return 0;
-
- ret = ep->ops->enable(ep, ep->desc);
- if (ret)
- return ret;
-
- ep->enabled = true;
-
- return 0;
-}
-
-/**
- * usb_ep_disable - endpoint is no longer usable
- * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
- *
- * no other task may be using this endpoint when this is called.
- * any pending and uncompleted requests will complete with status
- * indicating disconnect (-ESHUTDOWN) before this call returns.
- * gadget drivers must call usb_ep_enable() again before queueing
- * requests to the endpoint.
- *
- * returns zero, or a negative error code.
- */
+{ return 0; }
static inline int usb_ep_disable(struct usb_ep *ep)
-{
- int ret;
-
- if (!ep->enabled)
- return 0;
-
- ret = ep->ops->disable(ep);
- if (ret)
- return ret;
-
- ep->enabled = false;
-
- return 0;
-}
-
-/**
- * usb_ep_alloc_request - allocate a request object to use with this endpoint
- * @ep:the endpoint to be used with with the request
- * @gfp_flags:GFP_* flags to use
- *
- * Request objects must be allocated with this call, since they normally
- * need controller-specific setup and may even need endpoint-specific
- * resources such as allocation of DMA descriptors.
- * Requests may be submitted with usb_ep_queue(), and receive a single
- * completion callback. Free requests with usb_ep_free_request(), when
- * they are no longer needed.
- *
- * Returns the request, or null if one could not be allocated.
- */
+{ return 0; }
static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
- gfp_t gfp_flags)
-{
- return ep->ops->alloc_request(ep, gfp_flags);
-}
-
-/**
- * usb_ep_free_request - frees a request object
- * @ep:the endpoint associated with the request
- * @req:the request being freed
- *
- * Reverses the effect of usb_ep_alloc_request().
- * Caller guarantees the request is not queued, and that it will
- * no longer be requeued (or otherwise used).
- */
+ gfp_t gfp_flags)
+{ return NULL; }
static inline void usb_ep_free_request(struct usb_ep *ep,
- struct usb_request *req)
-{
- ep->ops->free_request(ep, req);
-}
-
-/**
- * usb_ep_queue - queues (submits) an I/O request to an endpoint.
- * @ep:the endpoint associated with the request
- * @req:the request being submitted
- * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
- * pre-allocate all necessary memory with the request.
- *
- * This tells the device controller to perform the specified request through
- * that endpoint (reading or writing a buffer). When the request completes,
- * including being canceled by usb_ep_dequeue(), the request's completion
- * routine is called to return the request to the driver. Any endpoint
- * (except control endpoints like ep0) may have more than one transfer
- * request queued; they complete in FIFO order. Once a gadget driver
- * submits a request, that request may not be examined or modified until it
- * is given back to that driver through the completion callback.
- *
- * Each request is turned into one or more packets. The controller driver
- * never merges adjacent requests into the same packet. OUT transfers
- * will sometimes use data that's already buffered in the hardware.
- * Drivers can rely on the fact that the first byte of the request's buffer
- * always corresponds to the first byte of some USB packet, for both
- * IN and OUT transfers.
- *
- * Bulk endpoints can queue any amount of data; the transfer is packetized
- * automatically. The last packet will be short if the request doesn't fill it
- * out completely. Zero length packets (ZLPs) should be avoided in portable
- * protocols since not all usb hardware can successfully handle zero length
- * packets. (ZLPs may be explicitly written, and may be implicitly written if
- * the request 'zero' flag is set.) Bulk endpoints may also be used
- * for interrupt transfers; but the reverse is not true, and some endpoints
- * won't support every interrupt transfer. (Such as 768 byte packets.)
- *
- * Interrupt-only endpoints are less functional than bulk endpoints, for
- * example by not supporting queueing or not handling buffers that are
- * larger than the endpoint's maxpacket size. They may also treat data
- * toggle differently.
- *
- * Control endpoints ... after getting a setup() callback, the driver queues
- * one response (even if it would be zero length). That enables the
- * status ack, after transferring data as specified in the response. Setup
- * functions may return negative error codes to generate protocol stalls.
- * (Note that some USB device controllers disallow protocol stall responses
- * in some cases.) When control responses are deferred (the response is
- * written after the setup callback returns), then usb_ep_set_halt() may be
- * used on ep0 to trigger protocol stalls. Depending on the controller,
- * it may not be possible to trigger a status-stage protocol stall when the
- * data stage is over, that is, from within the response's completion
- * routine.
- *
- * For periodic endpoints, like interrupt or isochronous ones, the usb host
- * arranges to poll once per interval, and the gadget driver usually will
- * have queued some data to transfer at that time.
- *
- * Returns zero, or a negative error code. Endpoints that are not enabled
- * report errors; errors will also be
- * reported when the usb peripheral is disconnected.
- */
-static inline int usb_ep_queue(struct usb_ep *ep,
- struct usb_request *req, gfp_t gfp_flags)
-{
- if (WARN_ON_ONCE(!ep->enabled && ep->address))
- return -ESHUTDOWN;
-
- return ep->ops->queue(ep, req, gfp_flags);
-}
-
-/**
- * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
- * @ep:the endpoint associated with the request
- * @req:the request being canceled
- *
- * If the request is still active on the endpoint, it is dequeued and its
- * completion routine is called (with status -ECONNRESET); else a negative
- * error code is returned. This is guaranteed to happen before the call to
- * usb_ep_dequeue() returns.
- *
- * Note that some hardware can't clear out write fifos (to unlink the request
- * at the head of the queue) except as part of disconnecting from usb. Such
- * restrictions prevent drivers from supporting configuration changes,
- * even to configuration zero (a "chapter 9" requirement).
- */
+ struct usb_request *req)
+{ }
+static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{ return 0; }
static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
-{
- return ep->ops->dequeue(ep, req);
-}
-
-/**
- * usb_ep_set_halt - sets the endpoint halt feature.
- * @ep: the non-isochronous endpoint being stalled
- *
- * Use this to stall an endpoint, perhaps as an error report.
- * Except for control endpoints,
- * the endpoint stays halted (will not stream any data) until the host
- * clears this feature; drivers may need to empty the endpoint's request
- * queue first, to make sure no inappropriate transfers happen.
- *
- * Note that while an endpoint CLEAR_FEATURE will be invisible to the
- * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
- * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
- * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
- *
- * Returns zero, or a negative error code. On success, this call sets
- * underlying hardware state that blocks data transfers.
- * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
- * transfer requests are still queued, or if the controller hardware
- * (usually a FIFO) still holds bytes that the host hasn't collected.
- */
+{ return 0; }
static inline int usb_ep_set_halt(struct usb_ep *ep)
-{
- return ep->ops->set_halt(ep, 1);
-}
-
-/**
- * usb_ep_clear_halt - clears endpoint halt, and resets toggle
- * @ep:the bulk or interrupt endpoint being reset
- *
- * Use this when responding to the standard usb "set interface" request,
- * for endpoints that aren't reconfigured, after clearing any other state
- * in the endpoint's i/o queue.
- *
- * Returns zero, or a negative error code. On success, this call clears
- * the underlying hardware state reflecting endpoint halt and data toggle.
- * Note that some hardware can't support this request (like pxa2xx_udc),
- * and accordingly can't correctly implement interface altsettings.
- */
+{ return 0; }
static inline int usb_ep_clear_halt(struct usb_ep *ep)
-{
- return ep->ops->set_halt(ep, 0);
-}
-
-/**
- * usb_ep_set_wedge - sets the halt feature and ignores clear requests
- * @ep: the endpoint being wedged
- *
- * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
- * requests. If the gadget driver clears the halt status, it will
- * automatically unwedge the endpoint.
- *
- * Returns zero on success, else negative errno.
- */
-static inline int
-usb_ep_set_wedge(struct usb_ep *ep)
-{
- if (ep->ops->set_wedge)
- return ep->ops->set_wedge(ep);
- else
- return ep->ops->set_halt(ep, 1);
-}
-
-/**
- * usb_ep_fifo_status - returns number of bytes in fifo, or error
- * @ep: the endpoint whose fifo status is being checked.
- *
- * FIFO endpoints may have "unclaimed data" in them in certain cases,
- * such as after aborted transfers. Hosts may not have collected all
- * the IN data written by the gadget driver (and reported by a request
- * completion). The gadget driver may not have collected all the data
- * written OUT to it by the host. Drivers that need precise handling for
- * fault reporting or recovery may need to use this call.
- *
- * This returns the number of such bytes in the fifo, or a negative
- * errno if the endpoint doesn't use a FIFO or doesn't support such
- * precise handling.
- */
+{ return 0; }
+static inline int usb_ep_set_wedge(struct usb_ep *ep)
+{ return 0; }
static inline int usb_ep_fifo_status(struct usb_ep *ep)
-{
- if (ep->ops->fifo_status)
- return ep->ops->fifo_status(ep);
- else
- return -EOPNOTSUPP;
-}
-
-/**
- * usb_ep_fifo_flush - flushes contents of a fifo
- * @ep: the endpoint whose fifo is being flushed.
- *
- * This call may be used to flush the "unclaimed data" that may exist in
- * an endpoint fifo after abnormal transaction terminations. The call
- * must never be used except when endpoint is not being used for any
- * protocol translation.
- */
+{ return 0; }
static inline void usb_ep_fifo_flush(struct usb_ep *ep)
-{
- if (ep->ops->fifo_flush)
- ep->ops->fifo_flush(ep);
-}
-
+{ }
+#endif /* USB_GADGET */
/*-------------------------------------------------------------------------*/
@@ -582,6 +326,7 @@ struct usb_gadget_ops {
* @dev: Driver model state for this abstract device.
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
+ * @mA: last set mA value
* @otg_caps: OTG capabilities of this gadget.
* @sg_supported: true if we can handle scatter-gather
* @is_otg: True if the USB device port uses a Mini-AB jack, so that the
@@ -638,6 +383,7 @@ struct usb_gadget {
struct device dev;
unsigned out_epnum;
unsigned in_epnum;
+ unsigned mA;
struct usb_otg_caps *otg_caps;
unsigned sg_supported:1;
@@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g)
#endif
}
-/**
- * usb_gadget_frame_number - returns the current frame number
- * @gadget: controller that reports the frame number
- *
- * Returns the usb frame number, normally eleven bits from a SOF packet,
- * or negative errno if this device doesn't support this capability.
- */
-static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
-{
- return gadget->ops->get_frame(gadget);
-}
+/*-------------------------------------------------------------------------*/
-/**
- * usb_gadget_wakeup - tries to wake up the host connected to this gadget
- * @gadget: controller used to wake up the host
- *
- * Returns zero on success, else negative error code if the hardware
- * doesn't support such attempts, or its support has not been enabled
- * by the usb host. Drivers must return device descriptors that report
- * their ability to support this, or hosts won't enable it.
- *
- * This may also try to use SRP to wake the host and start enumeration,
- * even if OTG isn't otherwise in use. OTG devices may also start
- * remote wakeup even when hosts don't explicitly enable it.
- */
+#if IS_ENABLED(CONFIG_USB_GADGET)
+int usb_gadget_frame_number(struct usb_gadget *gadget);
+int usb_gadget_wakeup(struct usb_gadget *gadget);
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
+int usb_gadget_vbus_connect(struct usb_gadget *gadget);
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA);
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget);
+int usb_gadget_connect(struct usb_gadget *gadget);
+int usb_gadget_disconnect(struct usb_gadget *gadget);
+int usb_gadget_deactivate(struct usb_gadget *gadget);
+int usb_gadget_activate(struct usb_gadget *gadget);
+#else
+static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
+{ return 0; }
static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
-{
- if (!gadget->ops->wakeup)
- return -EOPNOTSUPP;
- return gadget->ops->wakeup(gadget);
-}
-
-/**
- * usb_gadget_set_selfpowered - sets the device selfpowered feature.
- * @gadget:the device being declared as self-powered
- *
- * this affects the device status reported by the hardware driver
- * to reflect that it now has a local power supply.
- *
- * returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
-{
- if (!gadget->ops->set_selfpowered)
- return -EOPNOTSUPP;
- return gadget->ops->set_selfpowered(gadget, 1);
-}
-
-/**
- * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
- * @gadget:the device being declared as bus-powered
- *
- * this affects the device status reported by the hardware driver.
- * some hardware may not support bus-powered operation, in which
- * case this feature's value can never change.
- *
- * returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
-{
- if (!gadget->ops->set_selfpowered)
- return -EOPNOTSUPP;
- return gadget->ops->set_selfpowered(gadget, 0);
-}
-
-/**
- * usb_gadget_vbus_connect - Notify controller that VBUS is powered
- * @gadget:The device which now has VBUS power.
- * Context: can sleep
- *
- * This call is used by a driver for an external transceiver (or GPIO)
- * that detects a VBUS power session starting. Common responses include
- * resuming the controller, activating the D+ (or D-) pullup to let the
- * host detect that a USB device is attached, and starting to draw power
- * (8mA or possibly more, especially after SET_CONFIGURATION).
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget)
-{
- if (!gadget->ops->vbus_session)
- return -EOPNOTSUPP;
- return gadget->ops->vbus_session(gadget, 1);
-}
-
-/**
- * usb_gadget_vbus_draw - constrain controller's VBUS power usage
- * @gadget:The device whose VBUS usage is being described
- * @mA:How much current to draw, in milliAmperes. This should be twice
- * the value listed in the configuration descriptor bMaxPower field.
- *
- * This call is used by gadget drivers during SET_CONFIGURATION calls,
- * reporting how much power the device may consume. For example, this
- * could affect how quickly batteries are recharged.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
- if (!gadget->ops->vbus_draw)
- return -EOPNOTSUPP;
- return gadget->ops->vbus_draw(gadget, mA);
-}
-
-/**
- * usb_gadget_vbus_disconnect - notify controller about VBUS session end
- * @gadget:the device whose VBUS supply is being described
- * Context: can sleep
- *
- * This call is used by a driver for an external transceiver (or GPIO)
- * that detects a VBUS power session ending. Common responses include
- * reversing everything done in usb_gadget_vbus_connect().
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
-{
- if (!gadget->ops->vbus_session)
- return -EOPNOTSUPP;
- return gadget->ops->vbus_session(gadget, 0);
-}
-
-/**
- * usb_gadget_connect - software-controlled connect to USB host
- * @gadget:the peripheral being connected
- *
- * Enables the D+ (or potentially D-) pullup. The host will start
- * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_connect(struct usb_gadget *gadget)
-{
- int ret;
-
- if (!gadget->ops->pullup)
- return -EOPNOTSUPP;
-
- if (gadget->deactivated) {
- /*
- * If gadget is deactivated we only save new state.
- * Gadget will be connected automatically after activation.
- */
- gadget->connected = true;
- return 0;
- }
-
- ret = gadget->ops->pullup(gadget, 1);
- if (!ret)
- gadget->connected = 1;
- return ret;
-}
-
-/**
- * usb_gadget_disconnect - software-controlled disconnect from USB host
- * @gadget:the peripheral being disconnected
- *
- * Disables the D+ (or potentially D-) pullup, which the host may see
- * as a disconnect (when a VBUS session is active). Not all systems
- * support software pullup controls.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
-{
- int ret;
-
- if (!gadget->ops->pullup)
- return -EOPNOTSUPP;
-
- if (gadget->deactivated) {
- /*
- * If gadget is deactivated we only save new state.
- * Gadget will stay disconnected after activation.
- */
- gadget->connected = false;
- return 0;
- }
-
- ret = gadget->ops->pullup(gadget, 0);
- if (!ret)
- gadget->connected = 0;
- return ret;
-}
-
-/**
- * usb_gadget_deactivate - deactivate function which is not ready to work
- * @gadget: the peripheral being deactivated
- *
- * This routine may be used during the gadget driver bind() call to prevent
- * the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_activate() is called. For example, user mode components may
- * need to be activated before the system can talk to hosts.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
-{
- int ret;
-
- if (gadget->deactivated)
- return 0;
-
- if (gadget->connected) {
- ret = usb_gadget_disconnect(gadget);
- if (ret)
- return ret;
- /*
- * If gadget was being connected before deactivation, we want
- * to reconnect it in usb_gadget_activate().
- */
- gadget->connected = true;
- }
- gadget->deactivated = true;
-
- return 0;
-}
-
-/**
- * usb_gadget_activate - activate function which is not ready to work
- * @gadget: the peripheral being activated
- *
- * This routine activates gadget which was previously deactivated with
- * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
static inline int usb_gadget_activate(struct usb_gadget *gadget)
-{
- if (!gadget->deactivated)
- return 0;
-
- gadget->deactivated = false;
-
- /*
- * If gadget has been connected before deactivation, or became connected
- * while it was being deactivated, we call usb_gadget_connect().
- */
- if (gadget->connected)
- return usb_gadget_connect(gadget);
-
- return 0;
-}
+{ return 0; }
+#endif /* CONFIG_USB_GADGET */
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index de3237fce..5ff9032ee 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -12,7 +12,7 @@
#include <linux/usb/phy.h>
#if IS_ENABLED(CONFIG_OF)
-enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np);
+enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
bool of_usb_host_tpl_support(struct device_node *np);
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
@@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent,
int portnum);
#else
static inline enum usb_dr_mode
-of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
+of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
return USB_DR_MODE_UNKNOWN;
}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 8297e5b34..9217169c6 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -72,6 +72,7 @@ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t,
extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
extern int proc_setgroups_show(struct seq_file *m, void *v);
extern bool userns_may_setgroups(const struct user_namespace *ns);
+extern bool current_in_userns(const struct user_namespace *target_ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -100,6 +101,11 @@ static inline bool userns_may_setgroups(const struct user_namespace *ns)
{
return true;
}
+
+static inline bool current_in_userns(const struct user_namespace *target_ns)
+{
+ return true;
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 587480ad4..dd66a952e 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -27,8 +27,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
-extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags, unsigned long reason);
+extern int handle_userfault(struct fault_env *fe, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len);
@@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
-static inline int handle_userfault(struct vm_area_struct *vma,
- unsigned long address,
- unsigned int flags,
- unsigned long reason)
+static inline int handle_userfault(struct fault_env *fe, unsigned long reason)
{
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b39a5f315..960bedbde 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 6e6cb0c9d..26c155bb6 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -149,6 +149,19 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+/**
+ * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+{
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
new file mode 100644
index 000000000..1c912f85e
--- /dev/null
+++ b/include/linux/virtio_net.h
@@ -0,0 +1,101 @@
+#ifndef _LINUX_VIRTIO_NET_H
+#define _LINUX_VIRTIO_NET_H
+
+#include <linux/if_vlan.h>
+#include <uapi/linux/virtio_net.h>
+
+static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ unsigned short gso_type = 0;
+
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ gso_type |= SKB_GSO_TCP_ECN;
+
+ if (hdr->gso_size == 0)
+ return -EINVAL;
+ }
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+
+ if (!skb_partial_csum_set(skb, start, off))
+ return -EINVAL;
+ }
+
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+
+ skb_shinfo(skb)->gso_size = gso_size;
+ skb_shinfo(skb)->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+ }
+
+ return 0;
+}
+
+static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ /* This is a hint as to how much should be linear. */
+ hdr->hdr_len = __cpu_to_virtio16(little_endian,
+ skb_headlen(skb));
+ hdr->gso_size = __cpu_to_virtio16(little_endian,
+ sinfo->gso_size);
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ return -EINVAL;
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ } else
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ if (skb_vlan_tag_present(skb))
+ hdr->csum_start = __cpu_to_virtio16(little_endian,
+ skb_checksum_start_offset(skb) + VLAN_HLEN);
+ else
+ hdr->csum_start = __cpu_to_virtio16(little_endian,
+ skb_checksum_start_offset(skb));
+ hdr->csum_offset = __cpu_to_virtio16(little_endian,
+ skb->csum_offset);
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+ } /* else everything is zero */
+
+ return 0;
+}
+
+#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
new file mode 100644
index 000000000..9638bfeb0
--- /dev/null
+++ b/include/linux/virtio_vsock.h
@@ -0,0 +1,154 @@
+#ifndef _LINUX_VIRTIO_VSOCK_H
+#define _LINUX_VIRTIO_VSOCK_H
+
+#include <uapi/linux/virtio_vsock.h>
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/af_vsock.h>
+
+#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
+#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
+#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
+#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
+
+enum {
+ VSOCK_VQ_RX = 0, /* for host to guest data */
+ VSOCK_VQ_TX = 1, /* for guest to host data */
+ VSOCK_VQ_EVENT = 2,
+ VSOCK_VQ_MAX = 3,
+};
+
+/* Per-socket state (accessed via vsk->trans) */
+struct virtio_vsock_sock {
+ struct vsock_sock *vsk;
+
+ /* Protected by lock_sock(sk_vsock(trans->vsk)) */
+ u32 buf_size;
+ u32 buf_size_min;
+ u32 buf_size_max;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ /* Protected by tx_lock */
+ u32 tx_cnt;
+ u32 buf_alloc;
+ u32 peer_fwd_cnt;
+ u32 peer_buf_alloc;
+
+ /* Protected by rx_lock */
+ u32 fwd_cnt;
+ u32 rx_bytes;
+ struct list_head rx_queue;
+};
+
+struct virtio_vsock_pkt {
+ struct virtio_vsock_hdr hdr;
+ struct work_struct work;
+ struct list_head list;
+ void *buf;
+ u32 len;
+ u32 off;
+ bool reply;
+};
+
+struct virtio_vsock_pkt_info {
+ u32 remote_cid, remote_port;
+ struct msghdr *msg;
+ u32 pkt_len;
+ u16 type;
+ u16 op;
+ u32 flags;
+ bool reply;
+};
+
+struct virtio_transport {
+ /* This must be the first field */
+ struct vsock_transport transport;
+
+ /* Takes ownership of the packet */
+ int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+};
+
+ssize_t
+virtio_transport_stream_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len,
+ int type);
+int
+virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags);
+
+s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
+s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+
+int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ struct vsock_sock *psk);
+u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
+void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
+int
+virtio_transport_notify_poll_in(struct vsock_sock *vsk,
+ size_t target,
+ bool *data_ready_now);
+int
+virtio_transport_notify_poll_out(struct vsock_sock *vsk,
+ size_t target,
+ bool *space_available_now);
+
+int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
+ size_t target, ssize_t copied, bool data_read,
+ struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_send_init(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
+ ssize_t written, struct vsock_transport_send_notify_data *data);
+
+u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
+bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
+bool virtio_transport_stream_allow(u32 cid, u32 port);
+int virtio_transport_dgram_bind(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr);
+bool virtio_transport_dgram_allow(u32 cid, u32 port);
+
+int virtio_transport_connect(struct vsock_sock *vsk);
+
+int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
+
+void virtio_transport_release(struct vsock_sock *vsk);
+
+ssize_t
+virtio_transport_stream_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+int
+virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ struct sockaddr_vm *remote_addr,
+ struct msghdr *msg,
+ size_t len);
+
+void virtio_transport_destruct(struct vsock_sock *vsk);
+
+void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
+void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+
+#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ec084321f..4d6ec58a8 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -23,21 +23,23 @@
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
+ FOR_ALL_ZONES(ALLOCSTALL),
+ FOR_ALL_ZONES(PGSCAN_SKIP),
PGFREE, PGACTIVATE, PGDEACTIVATE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
- FOR_ALL_ZONES(PGREFILL),
- FOR_ALL_ZONES(PGSTEAL_KSWAPD),
- FOR_ALL_ZONES(PGSTEAL_DIRECT),
- FOR_ALL_ZONES(PGSCAN_KSWAPD),
- FOR_ALL_ZONES(PGSCAN_DIRECT),
+ PGREFILL,
+ PGSTEAL_KSWAPD,
+ PGSTEAL_DIRECT,
+ PGSCAN_KSWAPD,
+ PGSCAN_DIRECT,
PGSCAN_DIRECT_THROTTLE,
#ifdef CONFIG_NUMA
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
- PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+ PAGEOUTRUN, PGROTATED,
DROP_PAGECACHE, DROP_SLAB,
#ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES,
@@ -70,6 +72,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_FAULT_FALLBACK,
THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED,
+ THP_FILE_ALLOC,
+ THP_FILE_MAPPED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
@@ -100,4 +104,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_VM_EVENT_ITEMS
};
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; })
+#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; })
+#endif
+
#endif /* VM_EVENT_ITEM_H_INCLUDED */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d2da8e053..613771909 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -101,25 +101,42 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_vmacache_event(x) do {} while (0)
#endif
-#define __count_zone_vm_events(item, zone, delta) \
- __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
- zone_idx(zone), delta)
+#define __count_zid_vm_events(item, zid, delta) \
+ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
/*
- * Zone based page accounting with per cpu differentials.
+ * Zone and node-based page accounting with per cpu differentials.
*/
-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
+extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
static inline void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
- atomic_long_add(x, &vm_stat[item]);
+ atomic_long_add(x, &vm_zone_stat[item]);
+}
+
+static inline void node_page_state_add(long x, struct pglist_data *pgdat,
+ enum node_stat_item item)
+{
+ atomic_long_add(x, &pgdat->vm_stat[item]);
+ atomic_long_add(x, &vm_node_stat[item]);
}
static inline unsigned long global_page_state(enum zone_stat_item item)
{
- long x = atomic_long_read(&vm_stat[item]);
+ long x = atomic_long_read(&vm_zone_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+static inline unsigned long global_node_page_state(enum node_stat_item item)
+{
+ long x = atomic_long_read(&vm_node_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -160,32 +177,61 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
return x;
}
-#ifdef CONFIG_NUMA
+static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
+ enum node_stat_item item)
+{
+ long x = atomic_long_read(&pgdat->vm_stat[item]);
-extern unsigned long node_page_state(int node, enum zone_stat_item item);
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
-#else
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
-#define node_page_state(node, item) global_page_state(item)
+#ifdef CONFIG_NUMA
+extern unsigned long sum_zone_node_page_state(int node,
+ enum zone_stat_item item);
+extern unsigned long node_page_state(struct pglist_data *pgdat,
+ enum node_stat_item item);
+#else
+#define sum_zone_node_page_state(node, item) global_page_state(item)
+#define node_page_state(node, item) global_node_page_state(item)
#endif /* CONFIG_NUMA */
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
+#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
+#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
+void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
+void __inc_node_page_state(struct page *, enum node_stat_item);
+void __dec_node_page_state(struct page *, enum node_stat_item);
+
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
-extern void inc_zone_state(struct zone *, enum zone_stat_item);
+void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
+void inc_node_page_state(struct page *, enum node_stat_item);
+void dec_node_page_state(struct page *, enum node_stat_item);
+
+extern void inc_node_state(struct pglist_data *, enum node_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
+extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
+extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
@@ -213,16 +259,34 @@ static inline void __mod_zone_page_state(struct zone *zone,
zone_page_state_add(delta, zone, item);
}
+static inline void __mod_node_page_state(struct pglist_data *pgdat,
+ enum node_stat_item item, int delta)
+{
+ node_page_state_add(delta, pgdat, item);
+}
+
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_inc(&zone->vm_stat[item]);
- atomic_long_inc(&vm_stat[item]);
+ atomic_long_inc(&vm_zone_stat[item]);
+}
+
+static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+{
+ atomic_long_inc(&pgdat->vm_stat[item]);
+ atomic_long_inc(&vm_node_stat[item]);
}
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_dec(&zone->vm_stat[item]);
- atomic_long_dec(&vm_stat[item]);
+ atomic_long_dec(&vm_zone_stat[item]);
+}
+
+static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+{
+ atomic_long_dec(&pgdat->vm_stat[item]);
+ atomic_long_dec(&vm_node_stat[item]);
}
static inline void __inc_zone_page_state(struct page *page,
@@ -231,12 +295,26 @@ static inline void __inc_zone_page_state(struct page *page,
__inc_zone_state(page_zone(page), item);
}
+static inline void __inc_node_page_state(struct page *page,
+ enum node_stat_item item)
+{
+ __inc_node_state(page_pgdat(page), item);
+}
+
+
static inline void __dec_zone_page_state(struct page *page,
enum zone_stat_item item)
{
__dec_zone_state(page_zone(page), item);
}
+static inline void __dec_node_page_state(struct page *page,
+ enum node_stat_item item)
+{
+ __dec_node_state(page_pgdat(page), item);
+}
+
+
/*
* We only use atomic operations to update counters. So there is no need to
* disable interrupts.
@@ -245,7 +323,12 @@ static inline void __dec_zone_page_state(struct page *page,
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state
+#define inc_node_page_state __inc_node_page_state
+#define dec_node_page_state __dec_node_page_state
+#define mod_node_page_state __mod_node_page_state
+
#define inc_zone_state __inc_zone_state
+#define inc_node_state __inc_node_state
#define dec_zone_state __dec_zone_state
#define set_pgdat_percpu_threshold(pgdat, callback) { }
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 8d7634247..6abd24f25 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -45,7 +45,7 @@ void poke_blanked_console(void);
int con_font_op(struct vc_data *vc, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
-void scrollback(struct vc_data *vc, int lines);
+void scrollback(struct vc_data *vc);
void scrollfront(struct vc_data *vc, int lines);
void clear_buffer_attributes(struct vc_data *vc);
void update_region(struct vc_data *vc, unsigned long start, int count);
@@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg);
#ifdef CONFIG_CONSOLE_TRANSLATIONS
/* consolemap.c */
-struct unimapinit;
struct unipair;
int con_set_trans_old(unsigned char __user * table);
int con_get_trans_old(unsigned char __user * table);
int con_set_trans_new(unsigned short __user * table);
int con_get_trans_new(unsigned short __user * table);
-int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui);
+int con_clear_unimap(struct vc_data *vc);
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list);
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list);
int con_set_default_unimap(struct vc_data *vc);
@@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table)
{
return -EINVAL;
}
-static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+static inline int con_clear_unimap(struct vc_data *vc)
{
return 0;
}
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index fa2196990..aa9bfea88 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -12,11 +12,9 @@ struct task_struct;
/*
* vtime_accounting_cpu_enabled() definitions/declarations
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
static inline bool vtime_accounting_cpu_enabled(void) { return true; }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
/*
* Checks if vtime is enabled on some CPU. Cputime readers want to be careful
* in that case and compute the tickless cputime.
@@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void)
return false;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
static inline bool vtime_accounting_cpu_enabled(void) { return false; }
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#endif
/*
@@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
extern void vtime_account_user(struct task_struct *tsk);
-#ifdef __ARCH_HAS_VTIME_ACCOUNT
-extern void vtime_account_irq_enter(struct task_struct *tsk);
-#else
-extern void vtime_common_account_irq_enter(struct task_struct *tsk);
-static inline void vtime_account_irq_enter(struct task_struct *tsk)
-{
- if (vtime_accounting_cpu_enabled())
- vtime_common_account_irq_enter(tsk);
-}
-#endif /* __ARCH_HAS_VTIME_ACCOUNT */
-
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
static inline void vtime_task_switch(struct task_struct *prev) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account_user(struct task_struct *tsk) { }
-static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void arch_vtime_task_switch(struct task_struct *tsk);
-extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
-
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
-{
- if (vtime_accounting_cpu_enabled())
- vtime_gen_account_irq_exit(tsk);
-}
-
extern void vtime_user_enter(struct task_struct *tsk);
static inline void vtime_user_exit(struct task_struct *tsk)
@@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
-{
- /* On hard|softirq exit we always account to hard|softirq cputime */
- vtime_account_system(tsk);
-}
static inline void vtime_user_enter(struct task_struct *tsk) { }
static inline void vtime_user_exit(struct task_struct *tsk) { }
static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { }
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+extern void vtime_account_irq_enter(struct task_struct *tsk);
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+ /* On hard|softirq exit we always account to hard|softirq cputime */
+ vtime_account_system(tsk);
+}
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
+static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
+#endif
+
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
extern void irqtime_account_irq(struct task_struct *tsk);
#else
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 27d7a0ab5..c3ff74d76 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -600,6 +600,19 @@ do { \
__ret; \
})
+#define __wait_event_killable_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
+ schedule())
+
+#define wait_event_killable_exclusive(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_killable_exclusive(wq, condition); \
+ __ret; \
+})
+
#define __wait_event_freezable_exclusive(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 51732d6c9..7047bc7f8 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -66,7 +66,8 @@ struct watchdog_ops {
* as configurable from user space. Only relevant if
* max_hw_heartbeat_ms is not provided.
* @min_hw_heartbeat_ms:
- * Minimum time between heartbeats, in milli-seconds.
+ * Hardware limit for minimum time between heartbeats,
+ * in milli-seconds.
* @max_hw_heartbeat_ms:
* Hardware limit for maximum timeout, in milli-seconds.
* Replaces max_timeout if specified.
@@ -180,4 +181,7 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+/* devres register variant */
+int devm_watchdog_register_device(struct device *dev, struct watchdog_device *);
+
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/wbt.h b/include/linux/wbt.h
index 872da2d73..68ba75e3a 100644
--- a/include/linux/wbt.h
+++ b/include/linux/wbt.h
@@ -6,8 +6,24 @@
#include <linux/timer.h>
#include <linux/ktime.h>
-#define ISSUE_STAT_MASK (1ULL << 63)
-#define ISSUE_STAT_TIME_MASK ~ISSUE_STAT_MASK
+enum wbt_flags {
+ WBT_TRACKED = 1, /* write, tracked for throttling */
+ WBT_READ = 2, /* read */
+ WBT_KSWAPD = 4, /* write, from kswapd */
+
+ WBT_NR_BITS = 3, /* number of bits */
+};
+
+enum {
+ /*
+ * Set aside 3 bits for state, rest is a time stamp
+ */
+ ISSUE_STAT_SHIFT = 64 - WBT_NR_BITS,
+ ISSUE_STAT_MASK = ~((1ULL << ISSUE_STAT_SHIFT) - 1),
+ ISSUE_STAT_TIME_MASK = ~ISSUE_STAT_MASK,
+
+ WBT_NUM_RWQ = 2,
+};
struct wb_issue_stat {
u64 time;
@@ -24,26 +40,42 @@ static inline u64 wbt_issue_stat_get_time(struct wb_issue_stat *stat)
return stat->time & ISSUE_STAT_TIME_MASK;
}
-static inline void wbt_mark_tracked(struct wb_issue_stat *stat)
+static inline void wbt_clear_state(struct wb_issue_stat *stat)
{
- stat->time |= ISSUE_STAT_MASK;
+ stat->time &= ISSUE_STAT_TIME_MASK;
}
-static inline void wbt_clear_tracked(struct wb_issue_stat *stat)
+static inline enum wbt_flags wbt_stat_to_mask(struct wb_issue_stat *stat)
{
- stat->time &= ~ISSUE_STAT_MASK;
+ return (stat->time & ISSUE_STAT_MASK) >> ISSUE_STAT_SHIFT;
}
-static inline bool wbt_tracked(struct wb_issue_stat *stat)
+static inline void wbt_track(struct wb_issue_stat *stat, enum wbt_flags wb_acct)
{
- return (stat->time & ISSUE_STAT_MASK) != 0;
+ stat->time |= ((u64) wb_acct) << ISSUE_STAT_SHIFT;
+}
+
+static inline bool wbt_is_tracked(struct wb_issue_stat *stat)
+{
+ return (stat->time >> ISSUE_STAT_SHIFT) & WBT_TRACKED;
+}
+
+static inline bool wbt_is_read(struct wb_issue_stat *stat)
+{
+ return (stat->time >> ISSUE_STAT_SHIFT) & WBT_READ;
}
struct wb_stat_ops {
void (*get)(void *, struct blk_rq_stat *);
+ bool (*is_current)(struct blk_rq_stat *);
void (*clear)(void *);
};
+struct rq_wait {
+ wait_queue_head_t wait;
+ atomic_t inflight;
+};
+
struct rq_wb {
/*
* Settings that govern how we throttle
@@ -51,10 +83,8 @@ struct rq_wb {
unsigned int wb_background; /* background writeback */
unsigned int wb_normal; /* normal writeback */
unsigned int wb_max; /* max throughput writeback */
- unsigned int scale_step;
-
- u64 win_nsec; /* default window size */
- u64 cur_win_nsec; /* current window size */
+ int scale_step;
+ bool scaled_max;
/*
* Number of consecutive periods where we don't have enough
@@ -62,6 +92,9 @@ struct rq_wb {
*/
unsigned int unknown_cnt;
+ u64 win_nsec; /* default window size */
+ u64 cur_win_nsec; /* current window size */
+
struct timer_list window_timer;
s64 sync_issue;
@@ -74,19 +107,27 @@ struct rq_wb {
unsigned long last_comp; /* last non-throttled comp */
unsigned long min_lat_nsec;
struct backing_dev_info *bdi;
- struct request_queue *q;
- wait_queue_head_t wait;
- atomic_t inflight;
+ struct rq_wait rq_wait[WBT_NUM_RWQ];
struct wb_stat_ops *stat_ops;
void *ops_data;
};
+static inline unsigned int wbt_inflight(struct rq_wb *rwb)
+{
+ unsigned int i, ret = 0;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ ret += atomic_read(&rwb->rq_wait[i].inflight);
+
+ return ret;
+}
+
struct backing_dev_info;
-void __wbt_done(struct rq_wb *);
+void __wbt_done(struct rq_wb *, enum wbt_flags);
void wbt_done(struct rq_wb *, struct wb_issue_stat *);
-bool wbt_wait(struct rq_wb *, unsigned int, spinlock_t *);
+enum wbt_flags wbt_wait(struct rq_wb *, unsigned int, spinlock_t *);
struct rq_wb *wbt_init(struct backing_dev_info *, struct wb_stat_ops *, void *);
void wbt_exit(struct rq_wb *);
void wbt_update_limits(struct rq_wb *);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ca73c503b..26cc1df28 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
static inline void wq_watchdog_touch(int cpu) { }
#endif /* CONFIG_WQ_WATCHDOG */
+#ifdef CONFIG_SMP
+int workqueue_prepare_cpu(unsigned int cpu);
+int workqueue_online_cpu(unsigned int cpu);
+int workqueue_offline_cpu(unsigned int cpu);
+#endif
+
#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 6e4a35aca..e53abf2bf 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -100,14 +100,14 @@ struct writeback_control {
#endif
};
-static inline int wbc_to_write_cmd(struct writeback_control *wbc)
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
{
if (wbc->sync_mode == WB_SYNC_ALL)
return WRITE_SYNC;
else if (wbc->for_kupdate || wbc->for_background)
return WRITE_BG;
- return WRITE;
+ return 0;
}
/*
@@ -330,7 +330,7 @@ void laptop_mode_timer_fn(unsigned long data);
static inline void laptop_sync_completion(void) { }
#endif
void throttle_vm_writeout(gfp_t gfp_mask);
-bool zone_dirty_ok(struct zone *zone);
+bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
void wb_domain_exit(struct wb_domain *dom);
@@ -394,4 +394,7 @@ void tag_pages_for_writeback(struct address_space *mapping,
void account_page_redirty(struct page *page);
+void sb_mark_inode_writeback(struct inode *inode);
+void sb_clear_inode_writeback(struct inode *inode);
+
#endif /* WRITEBACK_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 760399a47..2bb5deb00 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -173,14 +173,14 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
- if (!config_enabled(CONFIG_PROVE_LOCKING))
+ if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
- if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
+ if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif